code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from math import factorial
def __a ( lowerCAmelCase__ : int = 100 ):
return sum(map(lowerCAmelCase__ , str(factorial(lowerCAmelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 688
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] ):
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 121
| 0
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_a : Optional[int] = get_logger()
_a : Optional[dict] = None
class _UpperCAmelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping]):
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
super().__init__(features=snake_case_ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case_ , snake_case_ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(snake_case_ )}, as `jaxlib.xla_extension.Device` '
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_snake_case : Union[str, Any] = device if isinstance(snake_case_ , snake_case_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_snake_case : Tuple = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
_snake_case : Dict = str(jax.devices()[0] )
_snake_case : List[Any] = jnp_array_kwargs
@staticmethod
def lowerCamelCase__ ( ):
import jax
return {str(snake_case_ ): device for device in jax.devices()}
def lowerCamelCase__ ( self , snake_case_ ):
import jax
import jax.numpy as jnp
if isinstance(snake_case_ , snake_case_ ) and column:
if all(
isinstance(snake_case_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case_ , axis=0 )
return column
def lowerCamelCase__ ( self , snake_case_ ):
import jax
import jax.numpy as jnp
if isinstance(snake_case_ , (str, bytes, type(snake_case_ )) ):
return value
elif isinstance(snake_case_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_snake_case : Union[str, Any] = {}
if isinstance(snake_case_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_snake_case : Optional[int] = {"dtype": jnp.intaa}
else:
_snake_case : Union[str, Any] = {"dtype": jnp.intaa}
elif isinstance(snake_case_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_snake_case : List[str] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case_ , PIL.Image.Image ):
_snake_case : Optional[Any] = np.asarray(snake_case_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_snake_case : str = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase__ ( self , snake_case_ ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case_ , "__array__" ) and not isinstance(snake_case_ , jax.Array ):
_snake_case : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case_ ) for substruct in data_struct] )
elif isinstance(snake_case_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case_ ) for substruct in data_struct] )
return self._tensorize(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
return map_nested(self._recursive_tensorize , snake_case_ , map_list=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : List[str] = self.numpy_arrow_extractor().extract_row(snake_case_ )
_snake_case : Optional[int] = self.python_features_decoder.decode_row(snake_case_ )
return self.recursive_tensorize(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : Any = self.numpy_arrow_extractor().extract_column(snake_case_ )
_snake_case : Any = self.python_features_decoder.decode_column(snake_case_ , pa_table.column_names[0] )
_snake_case : Any = self.recursive_tensorize(snake_case_ )
_snake_case : str = self._consolidate(snake_case_ )
return column
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : int = self.numpy_arrow_extractor().extract_batch(snake_case_ )
_snake_case : Optional[int] = self.python_features_decoder.decode_batch(snake_case_ )
_snake_case : Union[str, Any] = self.recursive_tensorize(snake_case_ )
for column_name in batch:
_snake_case : int = self._consolidate(batch[column_name] )
return batch
| 87
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase : Optional[int] = """pt"""
elif is_tf_available():
__lowercase : List[Any] = """tf"""
else:
__lowercase : Optional[int] = """jax"""
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :List[Any] = ByTaTokenizer
__lowercase :List[Any] = False
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=20 , UpperCamelCase__=5 ) -> Tuple[str, list]:
'''simple docstring'''
lowerCamelCase_ = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase_ = list(filter(lambda UpperCamelCase__ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase_ = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase_ = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase_ = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase_ = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase_ = ''' ''' + output_txt
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.ta_base_tokenizer
lowerCamelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCamelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.ta_base_tokenizer
lowerCamelCase_ = '''Unicode €.'''
lowerCamelCase_ = tokenizer(UpperCamelCase__ )
lowerCamelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ )
# decoding
lowerCamelCase_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''Unicode €.</s>''' )
lowerCamelCase_ = tokenizer('''e è é ê ë''' )
lowerCamelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ )
# decoding
lowerCamelCase_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.ta_base_tokenizer
lowerCamelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCamelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.ta_base_tokenizer
lowerCamelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCamelCase__ )
self.assertIn('''attention_mask''' , UpperCamelCase__ )
self.assertNotIn('''decoder_input_ids''' , UpperCamelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.ta_base_tokenizer
lowerCamelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCamelCase_ = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding='''max_length''' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.ta_base_tokenizer
lowerCamelCase_ = ['''A long paragraph for summarization. </s>''']
lowerCamelCase_ = ['''Summary of the text. </s>''']
# fmt: off
lowerCamelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase_ = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch['''input_ids'''][0] )
self.assertEqual(UpperCamelCase__ , batch['''labels'''][0] )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCamelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCamelCase__ )
lowerCamelCase_ = [F"""<extra_id_{i}>""" for i in range(125 )]
lowerCamelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCamelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCamelCase__ )]
lowerCamelCase_ = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCamelCase_ = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase_ = 0
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + '''_id''' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '''_id''' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + '''_id''' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '''_id''' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(UpperCamelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 142
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[int] = ["image_processor", "tokenizer"]
__lowercase :int = "ChineseCLIPImageProcessor"
__lowercase :Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
lowerCamelCase_ = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
| 142
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 253
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Dict = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Union[str, Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
snake_case_ : Any = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = LxmertTokenizer
def __init__( self : int , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=None , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]="[UNK]" , __magic_name__ : str="[SEP]" , __magic_name__ : int="[PAD]" , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Any="[MASK]" , __magic_name__ : str=True , __magic_name__ : int=None , **__magic_name__ : Any , ) -> str:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __magic_name__ ) != do_lower_case
or normalizer_state.get("strip_accents" , __magic_name__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __magic_name__ ) != tokenize_chinese_chars
):
lowerCamelCase_ : int = getattr(__magic_name__ , normalizer_state.pop("type" ) )
lowerCamelCase_ : Optional[int] = do_lower_case
lowerCamelCase_ : int = strip_accents
lowerCamelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCamelCase_ : Optional[Any] = normalizer_class(**__magic_name__ )
lowerCamelCase_ : int = do_lower_case
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str , __magic_name__ : Dict=None ) -> Dict:
lowerCamelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 253
| 1
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : List[str] , _snake_case : Any ) -> Dict:
'''simple docstring'''
_A = set()
_A = []
def parse_line(_snake_case : Optional[Any] ):
for line in fp:
if isinstance(_snake_case , _snake_case ):
_A = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_snake_case ) > 0:
_A = '\n'.join(_snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_snake_case )
buffer.clear()
continue
else:
_A = line.strip()
buffer.append(_snake_case )
if from_gh:
for filename in os.listdir(_snake_case ):
_A = os.path.join(_snake_case , _snake_case )
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(_snake_case ) as fp:
parse_line(_snake_case )
else:
try:
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_snake_case ) as fp:
parse_line(_snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ) -> Dict:
'''simple docstring'''
_A = set()
_A = [os.path.join(_snake_case , _snake_case ) for p in os.listdir(_snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_snake_case , _snake_case ) )
return selected_warnings
if __name__ == "__main__":
def _snake_case ( _snake_case : int ) -> Dict:
'''simple docstring'''
return values.split(',' )
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a = parser.parse_args()
a = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a = extract_warnings(args.output_dir, args.targets)
a = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 7
|
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
| 1
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
'''simple docstring'''
model.train()
UpperCAmelCase = model(lowerCAmelCase )
UpperCAmelCase = F.mse_loss(lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase = RegressionModel()
UpperCAmelCase = deepcopy(lowerCAmelCase )
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCAmelCase = LambdaLR(lowerCAmelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
UpperCAmelCase = LambdaLR(lowerCAmelCase , lr_lambda=lambda lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase = accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase = accelerator.prepare(lowerCAmelCase , lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase = get_training_setup(lowerCAmelCase )
# Use a single batch
UpperCAmelCase = next(iter(lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
# Sync grads
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(lowerCAmelCase ) )]
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
UpperCAmelCase = get_training_setup(lowerCAmelCase )
# Use a single batch
UpperCAmelCase = next(iter(lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
# Sync grads
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(lowerCAmelCase ) )]
def _lowerCAmelCase ( lowerCAmelCase=False , lowerCAmelCase=False ):
'''simple docstring'''
UpperCAmelCase = Accelerator(
split_batches=lowerCAmelCase , dispatch_batches=lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase = get_training_setup(lowerCAmelCase )
for iteration, batch in enumerate(lowerCAmelCase ):
UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(lowerCAmelCase ) )]
GradientState._reset_state()
def _lowerCAmelCase ( lowerCAmelCase=False , lowerCAmelCase=False ):
'''simple docstring'''
UpperCAmelCase = Accelerator(
split_batches=lowerCAmelCase , dispatch_batches=lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase = get_training_setup(lowerCAmelCase , lowerCAmelCase )
for iteration, batch in enumerate(lowerCAmelCase ):
UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase ):
step_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Accelerator()
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(lowerCAmelCase , batch_size=16 )
UpperCAmelCase = RegressionDataset(length=96 )
UpperCAmelCase = DataLoader(lowerCAmelCase , batch_size=16 )
UpperCAmelCase = accelerator.prepare(lowerCAmelCase , lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase )
if iteration < len(lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase )
if batch_num < len(lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Accelerator()
UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCAmelCase , lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase , lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 714
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase_ : int = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = 1000
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCAmelCase , )
return preprocessor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCAmelCase = sorted(set(lowerCAmelCase ) )
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = {b: str(lowerCAmelCase ) for b, i in zip(lowerCAmelCase , range(lowerCAmelCase ) )}
UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = """efficientnet.""" + item[1]
UpperCAmelCase = """classifier.weight"""
UpperCAmelCase = """classifier.bias"""
return key_mapping
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(lowerCAmelCase ) )
else:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase )
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = model_classes[model_name](
include_top=lowerCAmelCase , weights="""imagenet""" , input_tensor=lowerCAmelCase , input_shape=lowerCAmelCase , pooling=lowerCAmelCase , classes=1000 , classifier_activation="""softmax""" , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(lowerCAmelCase )
UpperCAmelCase = EfficientNetForImageClassification(lowerCAmelCase ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCAmelCase = rename_keys(lowerCAmelCase )
replace_params(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(lowerCAmelCase )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(lowerCAmelCase )
UpperCAmelCase = np.expand_dims(lowerCAmelCase , axis=0 )
UpperCAmelCase = original_model.predict(lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase ):
os.mkdir(lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase )
preprocessor.save_pretrained(lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
UpperCAmelCase = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase )
hf_model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 378
| 0
|
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416
|
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0_0 ) -> int:
A__ : Any =3
A__ : Optional[Any] =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 416
| 1
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase__ ( __lowerCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Tuple:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase__ = Generator(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , generator=lowerCamelCase__ , gen_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
lowerCAmelCase__ = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 718
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
a_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
a_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
a_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
a_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
a_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
a_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_UpperCAmelCase )
class lowercase__ :
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [titles]
lowerCAmelCase__ = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [texts]
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [questions] * n_passages
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
F"There should be as many titles than texts but got {len(__UpperCAmelCase )} titles and {len(__UpperCAmelCase )} texts." )
lowerCAmelCase__ = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["input_ids"]
lowerCAmelCase__ = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["input_ids"]
lowerCAmelCase__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ = attention_mask
return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , )-> List[DPRSpanPrediction]:
'''simple docstring'''
lowerCAmelCase__ = reader_input["input_ids"]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = sorted(range(__UpperCAmelCase ) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__ )
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> List[DPRSpanPrediction]:
'''simple docstring'''
lowerCAmelCase__ = []
for start_index, start_score in enumerate(__UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
lowerCAmelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =READER_PRETRAINED_VOCAB_FILES_MAP
a_ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =READER_PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
| 115
| 0
|
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 9, 14 # noqa: F841
_UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCAmelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_UpperCAmelCase = mst(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_UpperCAmelCase = tuple(answer[:2] )
_UpperCAmelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 602
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """vivit"""
def __init__( self : int , __UpperCamelCase : Union[str, Any]=2_2_4 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : str=[2, 1_6, 1_6] , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : int=7_6_8 , __UpperCamelCase : List[str]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : Optional[Any]=3_0_7_2 , __UpperCamelCase : Optional[Any]="gelu_fast" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[str]=1e-06 , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , )->Optional[int]:
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**__UpperCamelCase )
| 602
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_lowercase : str = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> str:
lowercase_ : List[str] = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowercase_ : Dict = int(re.match(R""".*layer_(\d*).*""" , UpperCAmelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Union[str, Any]:
if dtype == torch.bool:
return 1 / 8
lowercase_ : Tuple = re.search(R"""[^\d](\d+)$""" , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
lowercase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
# Construct model
if bloom_config_file == "":
lowercase_ : Union[str, Any] = BloomConfig()
else:
lowercase_ : int = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
lowercase_ : Optional[Any] = os.listdir(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) )
lowercase_ : Union[str, Any] = {"""weight_map""": {}, """metadata""": {}}
lowercase_ : Optional[int] = 0
lowercase_ : int = None
lowercase_ : str = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print("""Processing file: {}""".format(UpperCAmelCase__ ) )
lowercase_ : Any = None
for i in range(UpperCAmelCase__ ):
# load all TP files
lowercase_ : Any = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ : Any = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ : int = list(temp.keys() )
for key in keys:
lowercase_ : Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
lowercase_ : str = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Any = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : Dict = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowercase_ : Optional[int] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowercase_ : Optional[int] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
lowercase_ : Dict = BloomConfig()
lowercase_ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ : List[Any] = total_size
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowercase_ : List[Any] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n"""
f.write(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = BloomModel(UpperCAmelCase__ )
lowercase_ : int = os.listdir(UpperCAmelCase__ )
lowercase_ : Optional[int] = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) )
lowercase_ : Union[str, Any] = None
for i, file in enumerate(UpperCAmelCase__ ):
lowercase_ : Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
lowercase_ : List[str] = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ : List[Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ : int = list(temp.keys() )
for key in keys:
lowercase_ : int = temp.pop(UpperCAmelCase__ )
if tensors is None:
lowercase_ : Dict = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : int = tensors[key] / pretraining_tp
lowercase_ : Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowercase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
lowercase_ : str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowercase_ : Union[str, Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_lowercase : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def a_ ( ):
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_lowerCAmelCase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_lowerCAmelCase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_lowerCAmelCase , help='where to store parsed gold_data_path file' , )
lowercase__ : Union[str, Any] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowercase__ : List[str] = json.load(_lowerCAmelCase )
for dpr_record in tqdm(_lowerCAmelCase ):
lowercase__ : Any = dpr_record['question']
lowercase__ : Optional[Any] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_lowerCAmelCase ) + '\n' )
if __name__ == "__main__":
main()
| 599
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a , a , a ) -> List[Any]:
lowercase__ : List[str] = name
lowercase__ : List[str] = value
lowercase__ : Tuple = weight
def __repr__( self ) -> Any:
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _UpperCAmelCase ( self ) -> Any:
return self.value
def _UpperCAmelCase ( self ) -> int:
return self.name
def _UpperCAmelCase ( self ) -> str:
return self.weight
def _UpperCAmelCase ( self ) -> int:
return self.value / self.weight
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
lowercase__ : int = []
lowercase__ , lowercase__ : Dict = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowerCAmelCase_ : Optional[Any] = hex_num[0] == """-"""
if is_negative:
lowerCAmelCase_ : Optional[int] = hex_num[1:]
try:
lowerCAmelCase_ : Tuple = int(A__ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowerCAmelCase_ : Optional[Any] = """"""
while int_num > 0:
lowerCAmelCase_ : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = "▁"
__A : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
__A : Tuple = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__A : int = {
"facebook/xglm-564M": 2048,
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , lowerCamelCase : Any , lowerCamelCase : str="<s>" , lowerCamelCase : Optional[int]="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : List[Any]="<s>" , lowerCamelCase : Optional[Any]="<unk>" , lowerCamelCase : int="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Optional[Any] , ) -> None:
lowerCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase_ : str = 7
lowerCAmelCase_ : Any = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
lowerCAmelCase_ : Optional[Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
lowerCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : List[str] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowerCAmelCase_ : Union[str, Any] = len(self.sp_model )
lowerCAmelCase_ : Any = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase )
lowerCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase_ : str = None
lowerCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , lowerCamelCase : List[Any] ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowercase ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase ))
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowercase ( self : str ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : int , lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : Dict ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : int = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : str = """""".join(lowerCamelCase ).replace(lowerCamelCase , """ """ ).strip()
return out_string
def __lowercase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 398
| 0
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
A_ = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: List[str] = VOCAB_FILES_NAMES
a__: Any = PRETRAINED_VOCAB_FILES_MAP
a__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Union[str, Any] = ['input_ids', 'attention_mask']
a__: List[str] = CodeGenTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
if kwargs.pop('''add_bos_token''' , UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCAmelCase )
lowerCamelCase_ = add_prefix_space
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = super().decode(
token_ids=UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , **UpperCAmelCase , )
if truncate_before_pattern is not None and len(UpperCAmelCase ) > 0:
lowerCamelCase_ = self.truncate(UpperCAmelCase , UpperCAmelCase )
return decoded_text
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
def find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = pattern.search(UpperCAmelCase , UpperCAmelCase )
return m.start() if m else -1
lowerCamelCase_ = [re.compile(UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase_ = list(re.finditer('''^print''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowerCamelCase_ = completion[: prints[1].start()]
lowerCamelCase_ = list(re.finditer('''^def''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowerCamelCase_ = completion[: defs[1].start()]
lowerCamelCase_ = 0
lowerCamelCase_ = [
pos for pos in [find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase ) > 0:
return completion[: min(UpperCAmelCase )]
else:
return completion
| 29
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__ = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__ = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__ = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__ = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__ = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Source language id for translation."} )
__magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Target language id for translation."} )
__magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def lowercase (_A , _A , _A ):
"""simple docstring"""
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(_A , os.path.join(_A , f'{split}_results.json' ) )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(_A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Tuple = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_A , _A , _A ):
assert hasattr(_A , _A ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_A , _A , getattr(_A , _A ) )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_A , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_A , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase : Union[str, Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_A , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_A , _A ):
_lowerCAmelCase : Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_A )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase : Any = SeqaSeqDataset
# Get datasets
_lowerCAmelCase : List[Any] = (
dataset_class(
_A , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
_lowerCAmelCase : Optional[Any] = (
dataset_class(
_A , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase : Union[str, Any] = (
dataset_class(
_A , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _A ) if training_args.predict_with_generate else None
)
_lowerCAmelCase : int = SeqaSeqTrainer(
model=_A , args=_A , data_args=_A , train_dataset=_A , eval_dataset=_A , data_collator=SeqaSeqDataCollator(
_A , _A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_A , tokenizer=_A , )
_lowerCAmelCase : str = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
_lowerCAmelCase : Dict = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase : Any = train_result.metrics
_lowerCAmelCase : str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _A , training_args.output_dir )
all_metrics.update(_A )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase : List[str] = trainer.evaluate(metric_key_prefix='val' )
_lowerCAmelCase : int = data_args.n_val
_lowerCAmelCase : List[str] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _A , training_args.output_dir )
all_metrics.update(_A )
if training_args.do_predict:
logger.info('*** Predict ***' )
_lowerCAmelCase : Any = trainer.predict(test_dataset=_A , metric_key_prefix='test' )
_lowerCAmelCase : Optional[int] = test_output.metrics
_lowerCAmelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase : Dict = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _A , training_args.output_dir )
all_metrics.update(_A )
if training_args.predict_with_generate:
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
_lowerCAmelCase : List[Any] = lmap(str.strip , _A )
write_txt_file(_A , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_A , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase (_A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 444
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE_ : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _A :
__a = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__a = field(
default=_lowercase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__a = field(
default=_lowercase , metadata={'help': 'The column name of the images in the files.'} )
__a = field(default=_lowercase , metadata={'help': 'A folder containing the training data.'} )
__a = field(default=_lowercase , metadata={'help': 'A folder containing the validation data.'} )
__a = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__a = field(
default=_lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__a = field(
default=_lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowerCamelCase ( self ) -> Dict:
lowerCamelCase__ = {}
if self.train_dir is not None:
lowerCamelCase__ = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ = self.validation_dir
lowerCamelCase__ = data_files if data_files else None
@dataclass
class _A :
__a = field(
default=_lowercase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__a = field(
default=_lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
__a = field(
default=_lowercase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__a = field(
default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__a = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__a = field(default=_lowercase , metadata={'help': 'Name or path of preprocessor config.'} )
__a = field(
default=_lowercase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__a = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
__a = field(
default=_lowercase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class _A ( _lowercase ):
__a = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def UpperCAmelCase__ ( A__ ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
lowerCamelCase__ = ds["train"].train_test_split(data_args.train_val_split )
lowerCamelCase__ = split["train"]
lowerCamelCase__ = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowerCamelCase__ = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowerCamelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCamelCase__ = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
lowerCamelCase__ = ds["train"].column_names
else:
lowerCamelCase__ = ds["validation"].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ = "image"
elif "img" in column_names:
lowerCamelCase__ = "img"
else:
lowerCamelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ = image_processor.size["shortest_edge"]
else:
lowerCamelCase__ = (image_processor.size["height"], image_processor.size["width"])
lowerCamelCase__ = Compose(
[
Lambda(lambda A__ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(A__ ):
lowerCamelCase__ = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCamelCase__ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
lowerCamelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , snake_case__ )
trainer.save_metrics("eval" , snake_case__ )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def UpperCAmelCase__ ( A__ ) -> List[str]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 700
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ = torch.nn.Linear(2 , 4 )
lowerCamelCase__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCamelCase__ = torch.optim.lr_scheduler.OneCycleLR(A__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCamelCase__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCamelCase__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase__ ( A__ ) -> Any:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase__ ( A__ ) -> Any:
"""simple docstring"""
lowerCamelCase__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A__ )
class _A ( __a ):
@require_cuda
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = Accelerator(cpu=SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = GradientState()
assert state.num_steps == 1
lowerCamelCase__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCamelCase__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _lowerCamelCase ( self ) -> Dict:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
pass
with patch("torch.cuda.set_device" , SCREAMING_SNAKE_CASE__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowerCamelCase__ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = get_signature(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = get_signature(SCREAMING_SNAKE_CASE__ )
# saving hook
def save_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "data.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# loading hook
def load_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "data.json" ) , "r" ) as f:
lowerCamelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config["class_name"]
lowerCamelCase__ = accelerator.register_save_state_pre_hook(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = accelerator.register_load_state_pre_hook(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match with hooks
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCamelCase__ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match with hooks removed
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCamelCase__ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
lowerCamelCase__ = None
# This should work
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertTrue(dummy_obj is None )
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
lowerCamelCase__ = [1, 2, 3]
# This should work
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def _lowerCamelCase ( self ) -> str:
from transformers import AutoModelForCausalLM
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map={"": 0} , )
lowerCamelCase__ = Accelerator()
# This should work
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@slow
@require_bnb
def _lowerCamelCase ( self ) -> Tuple:
from transformers import AutoModelForCausalLM
lowerCamelCase__ = Accelerator()
with init_empty_weights():
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowerCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "cpu"
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=SCREAMING_SNAKE_CASE__ , load_in_abit=SCREAMING_SNAKE_CASE__ , llm_inta_enable_fpaa_cpu_offload=SCREAMING_SNAKE_CASE__ )
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
lowerCamelCase__ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowerCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 1
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = Accelerator()
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self ) -> List[Any]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowerCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 1
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = Accelerator()
# This should work
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@require_cuda
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = torch.nn.Linear(10 , 10 )
lowerCamelCase__ = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCamelCase__ = Accelerator(cpu=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
| 274
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ = 250_004
a_ = 250_020
@require_sentencepiece
@require_tokenizers
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : Tuple = MBartTokenizer(snake_case , keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = MBartTokenizer(snake_case , keep_accents=snake_case )
UpperCamelCase_ : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCamelCase_ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase_ : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase_ : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase_ : List[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase_ : Dict = tempfile.mkdtemp()
UpperCamelCase_ : List[str] = tokenizer_r.save_pretrained(snake_case )
UpperCamelCase_ : List[str] = tokenizer_p.save_pretrained(snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCamelCase_ : Optional[int] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case , snake_case )
# Checks everything loads correctly in the same way
UpperCamelCase_ : List[str] = tokenizer_r.from_pretrained(snake_case )
UpperCamelCase_ : str = tokenizer_p.from_pretrained(snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case , snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case )
# Save tokenizer rust, legacy_format=True
UpperCamelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCamelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(snake_case , legacy_format=snake_case )
UpperCamelCase_ : Dict = tokenizer_p.save_pretrained(snake_case )
# Checks it save with the same files
self.assertSequenceEqual(snake_case , snake_case )
# Checks everything loads correctly in the same way
UpperCamelCase_ : List[Any] = tokenizer_r.from_pretrained(snake_case )
UpperCamelCase_ : List[str] = tokenizer_p.from_pretrained(snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case , snake_case ) )
shutil.rmtree(snake_case )
# Save tokenizer rust, legacy_format=False
UpperCamelCase_ : Optional[Any] = tempfile.mkdtemp()
UpperCamelCase_ : List[Any] = tokenizer_r.save_pretrained(snake_case , legacy_format=snake_case )
UpperCamelCase_ : Union[str, Any] = tokenizer_p.save_pretrained(snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase_ : Optional[int] = tokenizer_r.from_pretrained(snake_case )
UpperCamelCase_ : Dict = tokenizer_p.from_pretrained(snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case , snake_case ) )
shutil.rmtree(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
UpperCamelCase_ : Any = 1
return cls
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.assertIn(snake_case , self.tokenizer.all_special_ids )
UpperCamelCase_ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
UpperCamelCase_ : int = self.tokenizer.decode(snake_case , skip_special_tokens=snake_case )
UpperCamelCase_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
self.assertNotIn(self.tokenizer.eos_token , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , snake_case )
UpperCamelCase_ : Optional[Any] = 1_0
UpperCamelCase_ : int = self.tokenizer(snake_case , max_length=snake_case , truncation=snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , snake_case )
self.assertEqual(len(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = tempfile.mkdtemp()
UpperCamelCase_ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case )
UpperCamelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case , return_tensors='pt' )
UpperCamelCase_ : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case , truncation=snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCamelCase_ : List[Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
UpperCamelCase_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.tokenizer(self.src_text , padding=snake_case , truncation=snake_case , max_length=3 , return_tensors='pt' )
UpperCamelCase_ : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case , truncation=snake_case , max_length=1_0 , return_tensors='pt' )
UpperCamelCase_ : Tuple = targets['input_ids']
UpperCamelCase_ : List[Any] = shift_tokens_right(snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 417
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Any = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = 4
UpperCamelCase_ : Union[str, Any] = 48
UpperCamelCase_ : List[Any] = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = [6, 6, 6, 6]
UpperCamelCase_ : Union[str, Any] = 60
UpperCamelCase_ : List[str] = [6, 6, 6, 6]
UpperCamelCase_ : Tuple = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase_ : Optional[int] = 4
UpperCamelCase_ : Dict = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase_ : Any = 1
UpperCamelCase_ : List[Any] = 1
UpperCamelCase_ : List[str] = 126
UpperCamelCase_ : Dict = 7
UpperCamelCase_ : int = 2_5_5.0
UpperCamelCase_ : str = ''
return config
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ):
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase_ : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase_ : Tuple = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
UpperCamelCase_ : List[str] = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
UpperCamelCase_ : Optional[Any] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
UpperCamelCase_ : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase_ : List[str] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase_ : Optional[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase_ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase_ : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
UpperCamelCase_ : List[str] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
UpperCamelCase_ : str = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
UpperCamelCase_ : Tuple = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
UpperCamelCase_ : Optional[int] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
UpperCamelCase_ : Dict = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase_ : List[str] = 'layernorm.bias'
if "conv_first" in name:
UpperCamelCase_ : Any = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase_ : int = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
UpperCamelCase_ : Optional[Any] = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
UpperCamelCase_ : Optional[Any] = name.replace('upsample.2' , 'upsample.convolution_1' )
UpperCamelCase_ : Dict = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase_ : List[Any] = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
UpperCamelCase_ : List[Any] = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
UpperCamelCase_ : Union[str, Any] = 'swin2sr.' + name
return name
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : str ):
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : Any = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
UpperCamelCase_ : List[str] = key.split('.' )
UpperCamelCase_ : Any = int(key_split[1] )
UpperCamelCase_ : Union[str, Any] = int(key_split[4] )
UpperCamelCase_ : str = config.embed_dim
if "weight" in key:
UpperCamelCase_ : str = val[:dim, :]
UpperCamelCase_ : Optional[int] = val[dim : dim * 2, :]
UpperCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
UpperCamelCase_ : Dict = val[:dim]
UpperCamelCase_ : Tuple = val[dim : dim * 2]
UpperCamelCase_ : str = val[-dim:]
pass
else:
UpperCamelCase_ : Tuple = val
return orig_state_dict
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : str ):
UpperCamelCase_ : int = get_config(lowerCamelCase )
UpperCamelCase_ : Dict = SwinaSRForImageSuperResolution(lowerCamelCase )
model.eval()
UpperCamelCase_ : Any = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
UpperCamelCase_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : List[Any] = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
UpperCamelCase_ : Tuple = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('RGB' )
UpperCamelCase_ : Dict = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase_ : Union[str, Any] = 126 if 'Jpeg' in checkpoint_url else 256
UpperCamelCase_ : str = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase_ : Union[str, Any] = transforms(lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase_ : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase_ : Optional[Any] = model(lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase_ : int = torch.Size([1, 3, 512, 512] )
UpperCamelCase_ : List[str] = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase_ : str = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase_ : Optional[Any] = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase_ : List[str] = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase_ : Any = torch.Size([1, 3, 512, 512] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase_ : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase , atol=1e-3 )
print('Looks ok!' )
UpperCamelCase_ : List[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
UpperCamelCase_ : List[str] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
a_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 417
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=64 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = vocab_size - 1
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCAmelCase__ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
lowerCamelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = True
lowerCamelCase__ = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = True
lowerCamelCase__ = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,use_cache=lowerCAmelCase__ )
lowerCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowerCamelCase__ = torch.cat([input_mask, next_mask] ,dim=-1 )
lowerCamelCase__ = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,output_hidden_states=lowerCAmelCase__ )
lowerCamelCase__ = output_from_no_past["hidden_states"][0]
lowerCamelCase__ = model(
lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,past_key_values=lowerCAmelCase__ ,output_hidden_states=lowerCAmelCase__ ,)["hidden_states"][0]
# select random slice
lowerCamelCase__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a__ ,a__ ,a__ ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_UpperCamelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTNeoXModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=lowerCAmelCase__ ,hidden_size=64 ,num_attention_heads=8 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
# This regression test was failing with PyTorch < 1.3
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def UpperCamelCase_ ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = ids_tensor([1, 10] ,config.vocab_size )
lowerCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
lowerCamelCase__ = original_model(lowerCAmelCase__ ).last_hidden_state
lowerCamelCase__ = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCamelCase__ = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
lowerCamelCase__ = scaled_model(lowerCAmelCase__ ).last_hidden_state
lowerCamelCase__ = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-5 ) )
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
lowerCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
lowerCamelCase__ = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowerCamelCase__ = model.generate(**lowerCAmelCase__ ,do_sample=lowerCAmelCase__ ,max_new_tokens=20 )
lowerCamelCase__ = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
| 708
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9
| 0
|
import heapq
import sys
import numpy as np
A_ : Optional[int] =tuple[int, int]
class lowercase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = []
a_ = set()
def lowercase__ ( self ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowercase__ ( self ):
"""simple docstring"""
return len(self.elements ) == 0
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowercase )
else:
# update
# print("update", item)
a_ = []
((a_) , (a_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((a_) , (a_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
if item in self.set:
self.set.remove(_lowercase )
a_ = []
((a_) , (a_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((a_) , (a_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase__ ( self ):
"""simple docstring"""
return self.elements[0][1]
def lowercase__ ( self ):
"""simple docstring"""
((a_) , (a_)) = heapq.heappop(self.elements )
self.set.remove(_lowercase )
return (priority, item)
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = np.array(SCREAMING_SNAKE_CASE_ )
a_ = np.array(SCREAMING_SNAKE_CASE_ )
return np.linalg.norm(a - b )
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return consistent_heuristic(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) // t
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = g_function[start] + Wa * heuristics[i](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return ans
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = np.chararray((n, n) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
a_ = """*"""
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, (n - 1) - i) in blocks:
a_ = """#"""
a_ = """-"""
a_ = back_pointer[goal]
while x != start:
((a_) , (a_)) = x
# print(x)
a_ = """-"""
a_ = back_pointer[x]
a_ = """-"""
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
a_ = back_pointer[goal]
while x != start:
print(SCREAMING_SNAKE_CASE_ , end=""" """ )
a_ = back_pointer[x]
print(SCREAMING_SNAKE_CASE_ )
sys.exit()
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
for itera in range(SCREAMING_SNAKE_CASE_ ):
open_list[itera].remove_element(SCREAMING_SNAKE_CASE_ )
# print("s", s)
# print("j", j)
((a_) , (a_)) = s
a_ = (x - 1, y)
a_ = (x + 1, y)
a_ = (x, y + 1)
a_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(SCREAMING_SNAKE_CASE_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(SCREAMING_SNAKE_CASE_ )
a_ = -1
a_ = float("""inf""" )
if valid(SCREAMING_SNAKE_CASE_ ) and g_function[neighbours] > g_function[s] + 1:
a_ = g_function[s] + 1
a_ = s
if neighbours not in close_list_anchor:
open_list[0].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if neighbours not in close_list_inad:
for var in range(1 , SCREAMING_SNAKE_CASE_ ):
if key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) <= Wa * key(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
open_list[j].put(
SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
A_ : Optional[int] ={0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
A_ : str =[
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
A_ : List[Any] =make_common_ground()
A_ : Optional[int] =blocks_blk
# hyper parameters
A_ : Dict =1
A_ : Optional[int] =1
A_ : Optional[int] =20
A_ : str =3 # one consistent and two other inconsistent
# start and end destination
A_ : Optional[Any] =(0, 0)
A_ : Optional[int] =(n - 1, n - 1)
A_ : Dict =1
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = {start: 0, goal: float("""inf""" )}
a_ = {start: -1, goal: -1}
a_ = []
a_ = set()
for i in range(SCREAMING_SNAKE_CASE_ ):
open_list.append(PriorityQueue() )
open_list[i].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
a_ = []
a_ = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
a_ , a_ = open_list[i].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_inad.append(SCREAMING_SNAKE_CASE_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
a_ = open_list[0].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_anchor.append(SCREAMING_SNAKE_CASE_ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 483
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
# Construct model
if openai_config_file == "":
A_ = OpenAIGPTConfig()
else:
A_ = OpenAIGPTConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
A_ = OpenAIGPTModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
A_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,SCREAMING_SNAKE_CASE_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 366
| 0
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __magic_name__ :
def __init__( self : str , lowercase_ : int , lowercase_ : Union[str, Any]=sys.maxsize ):
lowercase_ : Optional[int] = '''bilinear'''
lowercase_ : Optional[Any] = max_size
lowercase_ : List[Any] = short_edge_length
def __call__( self : int , lowercase_ : Dict ):
lowercase_ : Any = []
for img in imgs:
lowercase_ : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase_ : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase_ : int = size * 1.0 / min(lowercase_ , lowercase_ )
if h < w:
lowercase_ : Dict = size, scale * w
else:
lowercase_ : str = scale * h, size
if max(lowercase_ , lowercase_ ) > self.max_size:
lowercase_ : Any = self.max_size * 1.0 / max(lowercase_ , lowercase_ )
lowercase_ : Tuple = newh * scale
lowercase_ : Any = neww * scale
lowercase_ : List[str] = int(neww + 0.5 )
lowercase_ : List[str] = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase_ : Optional[int] = Image.fromarray(lowercase_ )
lowercase_ : List[str] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase_ : Optional[Any] = np.asarray(lowercase_ )
else:
lowercase_ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase_ : Union[str, Any] = nn.functional.interpolate(
lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_ ).squeeze(0 )
img_augs.append(lowercase_ )
return img_augs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Dict ):
lowercase_ : Optional[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase_ : Any = cfg.INPUT.FORMAT
lowercase_ : List[Any] = cfg.SIZE_DIVISIBILITY
lowercase_ : List[str] = cfg.PAD_VALUE
lowercase_ : Dict = cfg.INPUT.MAX_SIZE_TEST
lowercase_ : Optional[Any] = cfg.MODEL.DEVICE
lowercase_ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ : int = lambda lowercase_ : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[Any] ):
lowercase_ : Optional[Any] = tuple(max(lowercase_ ) for s in zip(*[img.shape for img in images] ) )
lowercase_ : Union[str, Any] = [im.shape[-2:] for im in images]
lowercase_ : Tuple = [
nn.functional.pad(
lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase_ , lowercase_ )
]
return torch.stack(lowercase_ ), torch.tensor(lowercase_ )
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any]=False ):
with torch.no_grad():
if not isinstance(lowercase_ , lowercase_ ):
lowercase_ : Any = [images]
if single_image:
assert len(lowercase_ ) == 1
for i in range(len(lowercase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowercase_ , images.pop(lowercase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase_ : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
lowercase_ : Dict = self.aug(lowercase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase_ : List[str] = [self.normalizer(lowercase_ ) for x in images]
# now pad them to do the following operations
lowercase_ : Dict = self.pad(lowercase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase_ : Union[str, Any] = torch.true_divide(lowercase_ , lowercase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple[int, int] ) -> Union[str, Any]:
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
lowercase_ : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 708
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 0
|
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__a , __a ) ) )
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
if point:
if isinstance(__a , __a ):
for item in point:
if not isinstance(__a , (int, float) ):
lowerCamelCase__: Union[str, Any] =(
"Expected a list of numbers as input, found "
F"""{type(__a ).__name__}"""
)
raise TypeError(__a )
else:
lowerCamelCase__: Dict =F"""Expected a list of numbers as input, found {type(__a ).__name__}"""
raise TypeError(__a )
else:
raise ValueError("Missing an input" )
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__a , __a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Optional[int] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: List[Any] =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features
lowerCamelCase__: int =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if split:
lowerCamelCase__: Any ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: str =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: List[str] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: Optional[Any] =Features({"image": Image()} )
lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 59
| 1
|
'''simple docstring'''
import qiskit
def lowercase__ ( __UpperCamelCase = 2 )-> qiskit.result.counts.Counts:
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowerCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowerCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCAmelCase ) ) , list(range(__lowerCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 711
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = ["""stem"""]
UpperCamelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> int:
"""simple docstring"""
return
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""Swin does not use inputs_embeds""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
F" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"""output_hidden_states""": True} )
@require_torch
class a_ ( unittest.TestCase , lowerCamelCase ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaskFormerSwinModelTester(self )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 35
| 0
|
def SCREAMING_SNAKE_CASE ( lowercase_=28_123 ) -> Union[str, Any]:
"""simple docstring"""
A__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
A__ = set()
A__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 87
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "efficientnet"
def __init__( self : Optional[Any] ,_snake_case : int = 3 ,_snake_case : int = 600 ,_snake_case : float = 2.0 ,_snake_case : float = 3.1 ,_snake_case : int = 8 ,_snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] ,_snake_case : List[int] = [32, 16, 24, 40, 80, 112, 192] ,_snake_case : List[int] = [16, 24, 40, 80, 112, 192, 320] ,_snake_case : List[int] = [] ,_snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] ,_snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] ,_snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] ,_snake_case : float = 0.25 ,_snake_case : str = "swish" ,_snake_case : int = 2_560 ,_snake_case : str = "mean" ,_snake_case : float = 0.02 ,_snake_case : float = 0.001 ,_snake_case : float = 0.99 ,_snake_case : float = 0.5 ,_snake_case : float = 0.2 ,**_snake_case : List[str] ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = width_coefficient
lowercase__ : Dict = depth_coefficient
lowercase__ : Optional[int] = depth_divisor
lowercase__ : Optional[int] = kernel_sizes
lowercase__ : str = in_channels
lowercase__ : Any = out_channels
lowercase__ : Union[str, Any] = depthwise_padding
lowercase__ : str = strides
lowercase__ : List[str] = num_block_repeats
lowercase__ : List[str] = expand_ratios
lowercase__ : List[str] = squeeze_expansion_ratio
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Any = hidden_dim
lowercase__ : Optional[int] = pooling_type
lowercase__ : List[str] = initializer_range
lowercase__ : List[Any] = batch_norm_eps
lowercase__ : List[Any] = batch_norm_momentum
lowercase__ : Tuple = dropout_rate
lowercase__ : Tuple = drop_connect_rate
lowercase__ : Union[str, Any] = sum(_snake_case ) * 4
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
| 560
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a_ : Dict = False
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int , snake_case__ : List[Any]=3_2 ):
"""simple docstring"""
set_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=snake_case__ , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=snake_case__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(snake_case__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2) ).to(snake_case__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(snake_case__ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE = model(snake_case__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE = model(snake_case__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( _lowerCAmelCase , unittest.TestCase ):
A = CLIPTokenizer
A = CLIPTokenizerFast
A = True
A = {}
A = False
def __snake_case (self ) -> Dict:
super().setUp()
# fmt: off
UpperCAmelCase_: Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase_: List[Any] = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCAmelCase_: Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
UpperCAmelCase_: List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_: Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_: Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[Any] = """lower newer"""
UpperCAmelCase_: Any = """lower newer"""
return input_text, output_text
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: List[str] = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCAmelCase_: Optional[int] = """lower newer"""
UpperCAmelCase_: str = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
UpperCAmelCase_: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_: List[str] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
@require_ftfy
def __snake_case (self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase_: str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
UpperCAmelCase_: int = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase_: int = """xa\u0303y""" + """ """ + """x\xe3y"""
UpperCAmelCase_: Optional[Any] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase_: Optional[int] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase_: str = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase_: List[Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase_: List[Any] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase_: Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_: List[str] = f'{text_of_1_token} {text_of_1_token}'
UpperCAmelCase_: str = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_, use_fast=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = tokenizer_r(SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )), )
UpperCAmelCase_: List[str] = f' {text}'
UpperCAmelCase_: Tuple = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_, use_fast=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = tokenizer_r(SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )), )
def __snake_case (self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __snake_case (self ) -> Dict:
super().test_tokenization_python_rust_equals()
def __snake_case (self ) -> int:
# CLIP always lower cases letters
pass
| 556
|
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: int | str ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = str(lowerCAmelCase__ )
return n == n[::-1]
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: int = 0
for i in range(1 , lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 556
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowerCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1.0 , __snake_case = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase: List[Any] = initial_learning_rate
UpperCAmelCase: Tuple = warmup_steps
UpperCAmelCase: List[str] = power
UpperCAmelCase: List[Any] = decay_schedule_fn
UpperCAmelCase: Any = name
def __call__( self , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase: int = tf.cast(__snake_case , tf.floataa )
UpperCAmelCase: List[str] = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase: List[str] = global_step_float / warmup_steps_float
UpperCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCAmelCase ( snake_case_ : float , snake_case_ : int , snake_case_ : int , snake_case_ : float = 0.0 , snake_case_ : float = 0.9 , snake_case_ : float = 0.999 , snake_case_ : float = 1e-8 , snake_case_ : Optional[float] = None , snake_case_ : Optional[float] = None , snake_case_ : float = 0.0 , snake_case_ : float = 1.0 , snake_case_ : Optional[List[str]] = None , ):
'''simple docstring'''
UpperCAmelCase: int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=snake_case_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=snake_case_ , )
if num_warmup_steps:
UpperCAmelCase: Dict = WarmUp(
initial_learning_rate=snake_case_ , decay_schedule_fn=snake_case_ , warmup_steps=snake_case_ , )
if weight_decay_rate > 0.0:
UpperCAmelCase: str = AdamWeightDecay(
learning_rate=snake_case_ , weight_decay_rate=snake_case_ , beta_a=snake_case_ , beta_a=snake_case_ , epsilon=snake_case_ , clipnorm=snake_case_ , global_clipnorm=snake_case_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=snake_case_ , )
else:
UpperCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=snake_case_ , beta_a=snake_case_ , beta_a=snake_case_ , epsilon=snake_case_ , clipnorm=snake_case_ , global_clipnorm=snake_case_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowerCamelCase ( lowercase ):
def __init__( self , __snake_case = 0.0_01 , __snake_case = 0.9 , __snake_case = 0.9_99 , __snake_case = 1e-7 , __snake_case = False , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "AdamWeightDecay" , **__snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
UpperCAmelCase: Any = weight_decay_rate
UpperCAmelCase: Optional[int] = include_in_weight_decay
UpperCAmelCase: List[str] = exclude_from_weight_decay
@classmethod
def A__ ( cls , __snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Tuple = {"WarmUp": WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def A__ ( self , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
UpperCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def A__ ( self , __snake_case , __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def A__ ( self , __snake_case , __snake_case=None , **__snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Any = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def A__ ( self , __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase: Optional[Any] = apply_state or {}
UpperCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase: str = self._fallback_apply_state(__snake_case , __snake_case )
UpperCAmelCase: Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A__ ( self , __snake_case , __snake_case , __snake_case=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: List[Any] = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
UpperCAmelCase: List[Any] = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None ) -> str:
"""simple docstring"""
UpperCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
UpperCAmelCase: Optional[Any] = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase: Optional[int] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def A__ ( self , __snake_case ) -> List[str]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class __lowerCamelCase ( lowercase ):
def __init__( self ) -> int:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = []
UpperCAmelCase: Any = None
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
if self._accum_steps is None:
UpperCAmelCase: Union[str, Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __snake_case ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
UpperCAmelCase: List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(__snake_case )}' )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def A__ ( self ) -> Dict:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) )
| 703
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( lowercase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , "tf_padding" ) )
self.parent.assertTrue(hasattr(__snake_case , "depth_multiplier" ) )
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3 , __snake_case=3_2 , __snake_case=0.25 , __snake_case=8 , __snake_case=True , __snake_case=1_0_2_4 , __snake_case=3_2 , __snake_case="relu6" , __snake_case=0.1 , __snake_case=0.02 , __snake_case=True , __snake_case=True , __snake_case=1_0 , __snake_case=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: Dict = parent
UpperCAmelCase: Optional[int] = batch_size
UpperCAmelCase: Optional[int] = num_channels
UpperCAmelCase: List[str] = image_size
UpperCAmelCase: Optional[Any] = depth_multiplier
UpperCAmelCase: List[Any] = min_depth
UpperCAmelCase: Optional[Any] = tf_padding
UpperCAmelCase: List[Any] = int(last_hidden_size * depth_multiplier )
UpperCAmelCase: Dict = output_stride
UpperCAmelCase: List[Any] = hidden_act
UpperCAmelCase: Union[str, Any] = classifier_dropout_prob
UpperCAmelCase: Tuple = use_labels
UpperCAmelCase: Tuple = is_training
UpperCAmelCase: int = num_labels
UpperCAmelCase: Union[str, Any] = initializer_range
UpperCAmelCase: Optional[Any] = scope
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase: List[Any] = None
UpperCAmelCase: Optional[Any] = None
if self.use_labels:
UpperCAmelCase: int = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase: Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase: Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Dict:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase: List[Any] = MobileNetVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase: Optional[int] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Dict = self.num_labels
UpperCAmelCase: Optional[int] = MobileNetVaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase: List[str] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase: int = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Union[str, Any] = config_and_inputs
UpperCAmelCase: Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowercase , lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__: Union[str, Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: Any = False
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: Any = False
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase: str = MobileNetVaConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def A__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def A__ ( self ) -> Tuple:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase: List[str] = model_class(__snake_case )
UpperCAmelCase: List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase: str = [*signature.parameters.keys()]
UpperCAmelCase: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __snake_case )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
UpperCAmelCase: Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase: Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase: int = outputs.hidden_states
UpperCAmelCase: Dict = 2_6
self.assertEqual(len(__snake_case ) , __snake_case )
UpperCAmelCase , UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase: Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase: Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def A__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: Dict = MobileNetVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Any = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__snake_case )
UpperCAmelCase: int = self.default_image_processor
UpperCAmelCase: Dict = prepare_img()
UpperCAmelCase: List[str] = image_processor(images=__snake_case , return_tensors="pt" ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase: List[str] = model(**__snake_case )
# verify the logits
UpperCAmelCase: Dict = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase: Optional[int] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 166
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Dict = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = "unispeech-sat"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="group" , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=128 , a__=16 , a__=False , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__=320 , a__=2 , a__=0.1 , a__=100 , a__=256 , a__=256 , a__=0.1 , a__="mean" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=0 , a__=1 , a__=2 , a__=504 , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : int = feat_extract_norm
_lowerCAmelCase : Any = feat_extract_activation
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : List[str] = list(a__ )
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : str = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : int = len(self.conv_dim )
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Optional[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : Union[str, Any] = layerdrop
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : str = num_clusters
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Tuple = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : List[Any] = mask_time_length
_lowerCAmelCase : List[Any] = mask_time_min_masks
_lowerCAmelCase : Optional[Any] = mask_feature_prob
_lowerCAmelCase : str = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : Tuple = num_codevector_groups
_lowerCAmelCase : str = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Optional[int] = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(a__ )
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : Union[str, Any] = list(a__ )
_lowerCAmelCase : List[str] = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 213
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = GPTSwaTokenizer
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Any = GPTSwaTokenizer(a__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """This is a test"""
_lowerCAmelCase : Optional[int] = """This is a test"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[Any] = """<s>"""
_lowerCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(a__ ) , 2000 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __A ( self ):
_lowerCAmelCase : Any = GPTSwaTokenizer(a__ )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [465, 287, 265, 631, 842] )
_lowerCAmelCase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
a__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(a__ )
# fmt: off
self.assertListEqual(
a__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __A ( self ):
_lowerCAmelCase : Optional[Any] = GPTSwaTokenizer(a__ )
_lowerCAmelCase : str = ["""This is a test""", """I was born in 92000, and this is falsé."""]
_lowerCAmelCase : List[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a__ , a__ ):
self.assertListEqual(tokenizer.encode_fast(a__ ) , a__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a__ , a__ ):
self.assertEqual(tokenizer.decode_fast(a__ ) , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : str = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_lowerCAmelCase : List[Any] = {"""input_ids""": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=a__ , )
| 213
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def snake_case__( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a : List[str] = dict(zip(lowercase , range(len(lowercase ) ) ) )
_a : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a : Optional[Any] = {'''unk_token''': '''<unk>'''}
_a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase ) )
def snake_case__( self , **lowercase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__( self , **lowercase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__( self , lowercase ) -> int:
_a : Optional[int] = '''lower newer'''
_a : List[Any] = '''lower newer'''
return input_text, output_text
def snake_case__( self ) -> List[str]:
_a : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : Union[str, Any] = '''lower newer'''
_a : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a : Any = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
_a : Union[str, Any] = tokens + [tokenizer.unk_token]
_a : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def snake_case__( self ) -> Dict:
_a : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def snake_case__( self ) -> List[Any]:
_a : Tuple = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
_a : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase )
_a : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase )
_a : List[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_a : int = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_a : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase )
_a : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case__( self ) -> Optional[Any]:
_a : str = self.get_tokenizer()
_a : Union[str, Any] = '''Encode this sequence.'''
_a : int = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_a : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
_a : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_a : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_a : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
_a : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
_a : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowercase )
_a : int = '''Encode <mask> sequence'''
_a : Union[str, Any] = '''Encode <mask>sequence'''
_a : Any = tokenizer.encode(lowercase )
_a : List[str] = encoded.index(lowercase )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
_a : Any = tokenizer.encode(lowercase )
_a : str = encoded.index(lowercase )
_a : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def snake_case__( self ) -> Tuple:
pass
def snake_case__( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a : str = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_a : Tuple = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_a : Optional[Any] = '''A, <mask> AllenNLP sentence.'''
_a : List[Any] = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_a : Dict = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a : str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case__( self ) -> int:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase )
def snake_case__( self ) -> Tuple:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a : Any = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a : int = F'{text_of_1_token} {text_of_1_token}'
_a : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : int = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_a : int = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_a : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_a : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_a : str = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
_a : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
_a : str = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_a : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 715
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = (CMStochasticIterativeScheduler,)
lowercase = 10
def snake_case__( self , **lowercase ) -> Tuple:
_a : Dict = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**lowercase )
return config
def snake_case__( self ) -> Dict:
_a : Union[str, Any] = 10
_a : List[Any] = self.get_scheduler_config()
_a : Union[str, Any] = self.scheduler_classes[0](**lowercase )
scheduler.set_timesteps(lowercase )
_a : List[str] = scheduler.timesteps[0]
_a : Union[str, Any] = scheduler.timesteps[1]
_a : Dict = self.dummy_sample
_a : str = 0.1 * sample
_a : int = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
_a : Dict = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__( self ) -> Tuple:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__( self ) -> Tuple:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase )
def snake_case__( self ) -> str:
_a : Tuple = self.scheduler_classes[0]
_a : Any = self.get_scheduler_config()
_a : List[Any] = scheduler_class(**lowercase )
_a : str = 1
scheduler.set_timesteps(lowercase )
_a : List[Any] = scheduler.timesteps
_a : List[str] = torch.manual_seed(0 )
_a : Tuple = self.dummy_model()
_a : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase ):
# 1. scale model input
_a : Optional[int] = scheduler.scale_model_input(lowercase , lowercase )
# 2. predict noise residual
_a : Tuple = model(lowercase , lowercase )
# 3. predict previous sample x_t-1
_a : Optional[int] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_a : Union[str, Any] = pred_prev_sample
_a : str = torch.sum(torch.abs(lowercase ) )
_a : int = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def snake_case__( self ) -> List[Any]:
_a : Any = self.scheduler_classes[0]
_a : Optional[int] = self.get_scheduler_config()
_a : List[Any] = scheduler_class(**lowercase )
_a : Any = [106, 0]
scheduler.set_timesteps(timesteps=lowercase )
_a : List[Any] = scheduler.timesteps
_a : Optional[int] = torch.manual_seed(0 )
_a : int = self.dummy_model()
_a : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_a : Optional[int] = scheduler.scale_model_input(lowercase , lowercase )
# 2. predict noise residual
_a : Union[str, Any] = model(lowercase , lowercase )
# 3. predict previous sample x_t-1
_a : Dict = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_a : int = pred_prev_sample
_a : List[Any] = torch.sum(torch.abs(lowercase ) )
_a : Optional[int] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def snake_case__( self ) -> Optional[int]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**lowercase )
_a : List[str] = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase )
def snake_case__( self ) -> str:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Tuple = scheduler_class(**lowercase )
_a : Any = [39, 30, 12, 1, 0]
_a : Optional[Any] = len(lowercase )
with self.assertRaises(lowercase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )
def snake_case__( self ) -> List[str]:
_a : Optional[Any] = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**lowercase )
_a : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowercase )
| 307
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = MvpTokenizer
lowerCAmelCase : Tuple = MvpTokenizerFast
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = filter_roberta_detectors
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
lowercase__ : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase__ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ : int = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase ( self : Optional[int] ,**_snake_case : List[str] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Tuple ,**_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[str] = tokenizer(_snake_case ,max_length=len(_snake_case ) ,padding=_snake_case ,return_tensors='''pt''' )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowercase__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case ,_snake_case )
# Test that special tokens are reset
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[Any] = tokenizer(_snake_case ,padding=_snake_case ,return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' ,_snake_case )
self.assertIn('''attention_mask''' ,_snake_case )
self.assertNotIn('''labels''' ,_snake_case )
self.assertNotIn('''decoder_attention_mask''' ,_snake_case )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(text_target=_snake_case ,max_length=32 ,padding='''max_length''' ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Any = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] ,padding=_snake_case ,truncation=_snake_case ,return_tensors='''pt''' )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(batch.input_ids.shape ,(2, 1_024) )
@require_torch
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = ['''A long paragraph for summarization.''']
lowercase__ : List[str] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(_snake_case ,text_target=_snake_case ,return_tensors='''pt''' )
lowercase__ : Dict = inputs['''input_ids''']
lowercase__ : Dict = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
lowercase__ : List[str] = '''A, <mask> AllenNLP sentence.'''
lowercase__ : Dict = tokenizer_r.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case )
lowercase__ : Any = tokenizer_p.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) ,sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) ,sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) ,)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_snake_case ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_snake_case ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 560
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "encoder-decoder"
lowerCAmelCase : int = True
def __init__( self : Optional[int] ,**_snake_case : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**_snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Tuple = kwargs.pop('''encoder''' )
lowercase__ : Dict = encoder_config.pop('''model_type''' )
lowercase__ : Union[str, Any] = kwargs.pop('''decoder''' )
lowercase__ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ : Optional[int] = AutoConfig.for_model(_snake_case ,**_snake_case )
lowercase__ : Dict = AutoConfig.for_model(_snake_case ,**_snake_case )
lowercase__ : List[str] = True
@classmethod
def UpperCAmelCase ( cls : str ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Optional[Any] ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase__ : Dict = True
lowercase__ : Any = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : List[Any] = self.encoder.to_dict()
lowercase__ : List[Any] = self.decoder.to_dict()
lowercase__ : Optional[int] = self.__class__.model_type
return output
| 560
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowercase ( UpperCamelCase__ ):
def __init__( self , a , a , a , a , a , a , a , a , a , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def lowercase__ ( self , a = "auto" ):
if slice_size == "auto":
snake_case__ : Any =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def lowercase__ ( self ):
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , a , a=1_6_0_0_0 , a = 5_1_2 , a = 5_1_2 , a = 5_0 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ):
snake_case__ : Dict =self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
snake_case__ : Optional[int] =self.speech_model.generate(_a , max_length=4_8_0_0_0_0 )
snake_case__ : Union[str, Any] =self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
snake_case__ : Any =1
elif isinstance(_a , _a ):
snake_case__ : Optional[int] =len(_a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_a )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_a )}." )
# get prompt text embeddings
snake_case__ : List[Any] =self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case__ : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case__ : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case__ : str =text_input_ids[:, : self.tokenizer.model_max_length]
snake_case__ : Optional[int] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case__ : List[Any] =text_embeddings.shape
snake_case__ : Tuple =text_embeddings.repeat(1 , _a , 1 )
snake_case__ : Union[str, Any] =text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : Any =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[str]
if negative_prompt is None:
snake_case__ : List[Any] =[""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !="
F" {type(_a )}." )
elif isinstance(_a , _a ):
snake_case__ : str =[negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
snake_case__ : Optional[int] =negative_prompt
snake_case__ : Tuple =text_input_ids.shape[-1]
snake_case__ : Dict =self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
snake_case__ : int =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ : Optional[Any] =uncond_embeddings.shape[1]
snake_case__ : Dict =uncond_embeddings.repeat(1 , _a , 1 )
snake_case__ : str =uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : Union[str, Any] =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : Union[str, Any] =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : Optional[Any] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case__ : List[str] =torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
snake_case__ : Dict =torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
snake_case__ : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case__ : int =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Union[str, Any] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : List[str] ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Optional[int] ={}
if accepts_eta:
snake_case__ : Dict =eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ : List[str] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : Dict =self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
snake_case__ : Optional[Any] =self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case__ : Dict =noise_pred.chunk(2 )
snake_case__ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : str =self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
snake_case__ : List[str] =1 / 0.18215 * latents
snake_case__ : List[str] =self.vae.decode(_a ).sample
snake_case__ : int =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case__ : int =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case__ : Dict =self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 710
|
class _lowercase :
def __init__( self , a ):
snake_case__ : Optional[int] =size
snake_case__ : List[Any] =[0] * size
snake_case__ : List[Any] =[0] * size
@staticmethod
def lowercase__ ( a ):
return index | (index + 1)
@staticmethod
def lowercase__ ( a ):
return (index & (index + 1)) - 1
def lowercase__ ( self , a , a ):
snake_case__ : Optional[Any] =value
while index < self.size:
snake_case__ : str =self.get_prev(a ) + 1
if current_left_border == index:
snake_case__ : Dict =value
else:
snake_case__ : str =max(a , a , a )
snake_case__ : Dict =self.get_next(a )
def lowercase__ ( self , a , a ):
right -= 1 # Because of right is exclusive
snake_case__ : Union[str, Any] =0
while left <= right:
snake_case__ : List[str] =self.get_prev(a )
if left <= current_left:
snake_case__ : Optional[Any] =max(a , self.tree[right] )
snake_case__ : Optional[Any] =current_left
else:
snake_case__ : Tuple =max(a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 448
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> list:
for i in range(len(lowerCAmelCase__ ) - 1 , 0 , -1 ):
UpperCAmelCase__ : Optional[int] = False
for j in range(lowerCAmelCase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = unsorted[j - 1], unsorted[j]
UpperCAmelCase__ : List[str] = True
for j in range(lowerCAmelCase__ ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : int = unsorted[j + 1], unsorted[j]
UpperCAmelCase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 75
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75
| 1
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = [False] * len(lowercase__)
lowerCamelCase__ = [-1] * len(lowercase__)
def dfs(lowercase__ , lowercase__):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase__ , 1 - c)
for i in range(len(lowercase__)):
if not visited[i]:
dfs(lowercase__ , 0)
for i in range(len(lowercase__)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 187
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [vocab_size]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
lowerCamelCase__ = keep_order
lowerCamelCase__ = []
lowerCamelCase__ = []
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
if self.n_clusters > 0:
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.d_embed // (self.div_val**i)
lowerCamelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = x
if proj is not None:
lowerCamelCase__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def a__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
if self.n_clusters == 0:
lowerCamelCase__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ = tf.where(__lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCamelCase__ = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i][0]
lowerCamelCase__ = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
lowerCamelCase__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 187
| 1
|
"""simple docstring"""
import numpy
class _lowerCamelCase :
def __init__( self : Union[str, Any] , UpperCamelCase : numpy.ndarray , UpperCamelCase : numpy.ndarray ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Union[str, Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[str] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : Tuple = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : Union[str, Any] = numpy.zeros(output_array.shape )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : Union[str, Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : numpy.ndarray , UpperCamelCase : int , UpperCamelCase : bool ) -> Optional[int]:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : str = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : str = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : numpy.ndarray ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = input_arr
lowerCAmelCase__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( __UpperCAmelCase ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( __UpperCAmelCase ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
lowerCAmelCase__ : List[str] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : Any = TwoHiddenLayerNeuralNetwork(
input_array=__UpperCAmelCase , output_array=__UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__UpperCAmelCase , iterations=10 , give_loss=__UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 299
|
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : Union[str, Any] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Any = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : Tuple = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def A__ ( lowerCamelCase ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_: Any = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Dict = f.readlines()
UpperCamelCase_: Tuple = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_: Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_: str = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_: Tuple = re.findall(r"""\[([^\]]+)\]""" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase_: Any = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_: Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase_: Tuple = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: str = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_: Tuple = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: List[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_: List[str] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase_: List[str] = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_: Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase_: Tuple = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_: Optional[int] = []
for key in import_dict_objects.keys():
UpperCamelCase_: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCamelCase_: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_: int = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[int] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_: Union[str, Any] = os.path.join(lowerCamelCase , """__init__.py""" )
UpperCamelCase_: Optional[int] = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_: Any = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase_: str = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_: Dict = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCamelCase_ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCamelCase_: Optional[Any] = direct_transformers_import(lowerCamelCase )
UpperCamelCase_: Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase_: List[Any] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase ) ) )
UpperCamelCase_: Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Dict = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548
| 0
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a = logging.get_logger(__name__)
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *snake_case_ : List[str] , **snake_case_ : Tuple ):
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , """decord""" )
self.check_model_type(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None ):
snake_case__ : Optional[Any] = {}
if frame_sampling_rate is not None:
snake_case__ : str = frame_sampling_rate
if num_frames is not None:
snake_case__ : str = num_frames
snake_case__ : List[Any] = {}
if top_k is not None:
snake_case__ : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , snake_case_ : Union[str, List[str]] , **snake_case_ : Tuple ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Optional[int]=1 ):
if num_frames is None:
snake_case__ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case__ : Any = BytesIO(requests.get(snake_case_ ).content )
snake_case__ : Optional[int] = VideoReader(snake_case_ )
videoreader.seek(0 )
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[int] = num_frames * frame_sampling_rate - 1
snake_case__ : Any = np.linspace(snake_case_ , snake_case_ , num=snake_case_ , dtype=np.intaa )
snake_case__ : List[str] = videoreader.get_batch(snake_case_ ).asnumpy()
snake_case__ : Union[str, Any] = list(snake_case_ )
snake_case__ : Any = self.image_processor(snake_case_ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Any = self.model(**snake_case_ )
return model_outputs
def lowerCamelCase ( self : str , snake_case_ : Dict , snake_case_ : List[Any]=5 ):
if top_k > self.model.config.num_labels:
snake_case__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case__ : str = model_outputs.logits.softmax(-1 )[0]
snake_case__ : int = probs.topk(snake_case_ )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
snake_case__ : List[Any] = scores.tolist()
snake_case__ : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 719
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__a = logging.getLogger(__name__)
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : Dict = git.Repo(search_parent_directories=_lowerCAmelCase )
snake_case__ : Optional[int] = {
"""repo_id""": str(_lowerCAmelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(_lowerCAmelCase , """git_log.json""" ) , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=4 )
def __snake_case( _lowerCAmelCase ) -> Tuple:
if params.n_gpu <= 0:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[Any] = -1
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case__ : Tuple = int(os.environ["""WORLD_SIZE"""] )
snake_case__ : str = int(os.environ["""N_GPU_NODE"""] )
snake_case__ : Dict = int(os.environ["""RANK"""] )
# number of nodes / node ID
snake_case__ : List[str] = params.world_size // params.n_gpu_per_node
snake_case__ : str = params.global_rank // params.n_gpu_per_node
snake_case__ : Optional[Any] = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case__ : int = 1
snake_case__ : str = 0
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = 0
snake_case__ : Any = 1
snake_case__ : List[Any] = 1
snake_case__ : Union[str, Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case__ : Optional[Any] = params.node_id == 0 and params.local_rank == 0
snake_case__ : Optional[Any] = params.n_nodes > 1
# summary
snake_case__ : List[Any] = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def __snake_case( _lowerCAmelCase ) -> List[str]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 301
| 0
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case ( A__ ,A__ ,A__ = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : List[str] = quote(A__ )
return hfh.hf_hub_url(A__ ,A__ ,repo_type="dataset" ,revision=A__ )
| 95
|
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 6
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__: Optional[int] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : int ) -> Dict:
return (preds == labels).mean()
@dataclass
class A__ :
__UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
__UpperCamelCase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__UpperCamelCase : str = field(metadata={"help": "Should contain the data files for the task."} )
__UpperCamelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Optional[int] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a : Optional[int] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,_UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
_a : Optional[int] =processors[data_args.task_name]()
_a : int =processor.get_labels()
_a : Any =len(_UpperCAmelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a : List[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_UpperCAmelCase ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_a : List[Any] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_a : Any =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_UpperCAmelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
_a : int =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=_UpperCAmelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_a : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=_UpperCAmelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(_UpperCAmelCase : EvalPrediction ) -> Dict:
_a : Dict =np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(_UpperCAmelCase ,p.label_ids )}
# Data collator
_a : int =DataCollatorWithPadding(_UpperCAmelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_a : Dict =Trainer(
model=_UpperCAmelCase ,args=_UpperCAmelCase ,train_dataset=_UpperCAmelCase ,eval_dataset=_UpperCAmelCase ,compute_metrics=_UpperCAmelCase ,data_collator=_UpperCAmelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : str ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : List[str] =trainer.evaluate()
_a : str =os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_master():
with open(_UpperCAmelCase ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,_UpperCAmelCase ,_UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
return results
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A__: Dict = 5_0000
A__: Optional[int] = 5000
A__ , A__: Optional[int] = os.path.split(__file__)
A__: Union[str, Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
for i in range(_UpperCAmelCase ):
_a : Union[str, Any] =dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> str:
for i in range(0 ,len(_UpperCAmelCase ) ,_UpperCAmelCase ):
_a : int =dataset[i : i + batch_size]
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Optional[int]:
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
_a : List[str] =dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : datasets.Dataset ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ) -> int:
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(0 ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[int] =dataset[i : i + batch_size]
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Optional[int] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
_a : Tuple =[
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
_a : Union[str, Any] =[
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_a : List[str] =datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_a : Optional[Any] =generate_example_dataset(
os.path.join(_UpperCAmelCase ,"""dataset.arrow""" ) ,_UpperCAmelCase ,num_examples=_UpperCAmelCase ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_UpperCAmelCase ) )
_a : int =func(_UpperCAmelCase ,**_UpperCAmelCase )
print("""shuffling dataset""" )
_a : int =dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_UpperCAmelCase ) )
_a : Union[str, Any] =func(
_UpperCAmelCase ,**_UpperCAmelCase )
with open(_UpperCAmelCase ,"""wb""" ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 506
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A = 'facebook/wmt19-en-de'
A = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
A = tokenizer(["Making tiny model"], return_tensors="pt")
A = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
A = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 475
|
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self ) -> None:
snake_case : Any = [2, 1, 2, -1]
snake_case : int = [1, 2, 3, 4]
def UpperCAmelCase ( self ) -> list[float]:
snake_case : int = len(self.first_signal )
snake_case : Union[str, Any] = len(self.second_signal )
snake_case : Dict = max(A , A )
# create a zero matrix of max_length x max_length
snake_case : Optional[int] = [[0] * max_length for i in range(A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(A ):
snake_case : Union[str, Any] = deque(self.second_signal )
rotated_signal.rotate(A )
for j, item in enumerate(A ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case : Any = np.matmul(np.transpose(A ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(A , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 587
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ :Any = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''spiece.model'''}
A_ :str = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ :Tuple = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ :Optional[int] = 0
A_ :Any = 1
A_ :List[str] = 2
A_ :Optional[int] = 3
A_ :Optional[int] = 4
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase__ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Union[str, Any] ="""left"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
__UpperCamelCase : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] =3
__UpperCamelCase : int =do_lower_case
__UpperCamelCase : str =remove_space
__UpperCamelCase : int =keep_accents
__UpperCamelCase : Optional[int] =vocab_file
__UpperCamelCase : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ={self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.__dict__.copy()
__UpperCamelCase : Optional[int] =None
return state
def __setstate__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase : int ={}
__UpperCamelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.remove_space:
__UpperCamelCase : Dict =' '.join(inputs.strip().split() )
else:
__UpperCamelCase : Optional[Any] =inputs
__UpperCamelCase : Union[str, Any] =outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__UpperCamelCase : str =unicodedata.normalize('NFKD' , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] )
if self.do_lower_case:
__UpperCamelCase : Tuple =outputs.lower()
return outputs
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.preprocess_text(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
__UpperCamelCase : Dict =[]
for piece in pieces:
if len(lowerCamelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__UpperCamelCase : Union[str, Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : str =cur_pieces[1:]
else:
__UpperCamelCase : List[Any] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase__ )
else:
new_pieces.append(lowerCamelCase__ )
return new_pieces
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ' ' ).strip()
return out_string
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = True , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =kwargs.pop('use_source_tokenizer' , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.convert_ids_to_tokens(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__UpperCamelCase : Tuple =[]
__UpperCamelCase : Union[str, Any] =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =[]
sub_texts.append(lowerCamelCase__ )
else:
current_sub_text.append(lowerCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__UpperCamelCase : Dict =''.join(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__UpperCamelCase : Optional[int] =self.clean_up_tokenization(lowerCamelCase__ )
return clean_text
else:
return text
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1]
return ([0] * len(lowerCamelCase__ )) + [1, 1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =[self.sep_token_id]
__UpperCamelCase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Optional[int] =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , 'wb' ) as fi:
__UpperCamelCase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 154
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A ( a_ = "" ) -> dict[str, float]:
__UpperCamelCase : Tuple =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase : Optional[int] =BeautifulSoup(requests.get(a_ ).text ,'html.parser' )
__UpperCamelCase : Union[str, Any] =soup.find_all('td' ,attrs='titleColumn' )
__UpperCamelCase : Any =soup.find_all('td' ,class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ ,a_ )
}
def A ( a_ = "IMDb_Top_250_Movies.csv" ) -> None:
__UpperCamelCase : Dict =get_imdb_top_aaa_movies()
with open(a_ ,'w' ,newline='' ) as out_file:
__UpperCamelCase : Any =csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 154
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : Optional[Any] = "dandelin/vilt-b32-finetuned-vqa"
a_ : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
a_ : Tuple = "image_qa"
a_ : Optional[int] = AutoProcessor
a_ : Any = AutoModelForVisualQuestionAnswering
a_ : List[str] = ["image", "text"]
a_ : str = ["text"]
def __init__( self : List[str] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : str ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Tuple , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
'''simple docstring'''
return self.pre_processor(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
def _snake_case ( self : List[Any] , _lowerCamelCase : str ):
'''simple docstring'''
with torch.no_grad():
return self.model(**_lowerCamelCase ).logits
def _snake_case ( self : Tuple , _lowerCamelCase : Any ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 519
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 519
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 702
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__a = parser.parse_args()
if args.model_type == "roberta":
__a = RobertaForMaskedLM.from_pretrained(args.model_name)
__a = 'roberta'
elif args.model_type == "gpt2":
__a = GPTaLMHeadModel.from_pretrained(args.model_name)
__a = 'transformer'
__a = model.state_dict()
__a = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__a = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__a = f'{prefix}.embeddings.{w}.weight'
__a = state_dict[param_name]
for w in ["weight", "bias"]:
__a = f'{prefix}.embeddings.LayerNorm.{w}'
__a = state_dict[param_name]
# Transformer Blocks #
__a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__a = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
__a = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__a = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__a = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
__a = state_dict[f'lm_head.dense.{w}']
__a = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__a = state_dict[f'{prefix}.ln_f.{w}']
__a = state_dict['lm_head.weight']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 257
| 0
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCAmelCase__ = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Dict ) -> Optional[int]:
'''simple docstring'''
A__ = SavedModel()
A__ = []
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
A__ = json.load(SCREAMING_SNAKE_CASE_ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE_ )] )
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
A__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
A__ = sorted(SCREAMING_SNAKE_CASE_ )
A__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE_ )
if strict and len(SCREAMING_SNAKE_CASE_ ) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE_ ) > 0:
print(F'Found the following incompatible ops for the opset {opset}:' )
print(*SCREAMING_SNAKE_CASE_ , sep="\n" )
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=1_2, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
lowerCAmelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 514
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
A__ = []
def generate(SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 514
| 1
|
from __future__ import annotations
_lowercase : Dict =[-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase : str =[-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __UpperCAmelCase ( UpperCamelCase__ :list[float] ) -> list[float]:
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = len(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
snake_case__ : float = -1
for j in range(i + 1 , UpperCamelCase__ ):
if arr[i] < arr[j]:
snake_case__ : Dict = arr[j]
break
result.append(UpperCamelCase__ )
return result
def __UpperCAmelCase ( UpperCamelCase__ :list[float] ) -> list[float]:
snake_case__ : Tuple = []
for i, outer in enumerate(UpperCamelCase__ ):
snake_case__ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
snake_case__ : List[str] = inner
break
result.append(UpperCamelCase__ )
return result
def __UpperCAmelCase ( UpperCamelCase__ :list[float] ) -> list[float]:
snake_case__ : List[Any] = len(UpperCamelCase__ )
snake_case__ : list[float] = []
snake_case__ : list[float] = [-1] * arr_size
for index in reversed(range(UpperCamelCase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
snake_case__ : List[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase : Optional[int] =(
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 718
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCamelCase__ :Iterable[str] , UpperCamelCase__ :int ) -> Generator[tuple[str, ...], None, None]:
snake_case__ : Union[str, Any] = iter(UpperCamelCase__ )
while True:
snake_case__ : Tuple = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> str:
snake_case__ : str = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case__ : List[str] = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
snake_case__ : List[str] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case__ : Union[str, Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str ) -> str:
snake_case__ : List[str] = generate_table(UpperCamelCase__ )
snake_case__ : List[str] = prepare_input(UpperCamelCase__ )
snake_case__ : int = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
snake_case__ , snake_case__ : List[str] = divmod(table.index(UpperCamelCase__ ) , 5 )
snake_case__ , snake_case__ : Tuple = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str ) -> str:
snake_case__ : List[Any] = generate_table(UpperCamelCase__ )
snake_case__ : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
snake_case__ , snake_case__ : Union[str, Any] = divmod(table.index(UpperCamelCase__ ) , 5 )
snake_case__ , snake_case__ : Optional[Any] = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 574
| 0
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
__lowerCamelCase : str = """</w>"""
__lowerCamelCase : Any = """@@ """
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ : Union[str, Any] = char
return pairs
# Speech2Text2 has no max input length
__lowerCamelCase : int = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : str="<pad>" , UpperCamelCase_ : Optional[int]="</s>" , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : int , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : Tuple = do_lower_case
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ : List[Any] = json.load(UpperCamelCase_ )
lowerCamelCase_ : int = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
lowerCamelCase_ : Dict = None
lowerCamelCase_ : Optional[Any] = None
else:
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ : List[str] = merges_handle.read().split('''\n''' )[:-1]
lowerCamelCase_ : Any = [tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase_ : List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCamelCase_ : Optional[Any] = {}
@property
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase_ : Union[str, Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCamelCase_ : Optional[int] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = bigram
lowerCamelCase_ : str = []
lowerCamelCase_ : Optional[int] = 0
while i < len(UpperCamelCase_ ):
try:
lowerCamelCase_ : Dict = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ : Tuple = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ : str = tuple(UpperCamelCase_ )
lowerCamelCase_ : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCamelCase_ : Any = get_pairs(UpperCamelCase_ )
lowerCamelCase_ : List[str] = ''' '''.join(UpperCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase_ : Dict = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase_ ):
lowerCamelCase_ : Any = word.replace(UpperCamelCase_ , '''''' )
lowerCamelCase_ : Tuple = word.replace(''' ''' , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = word
return word
def __UpperCamelCase ( self : str , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowerCamelCase_ : List[Any] = text.lower()
lowerCamelCase_ : List[str] = text.split()
lowerCamelCase_ : Dict = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = self.decoder.get(UpperCamelCase_ , self.unk_token )
return result
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = ''' '''.join(UpperCamelCase_ )
# make sure @@ tokens are concatenated
lowerCamelCase_ : int = ''''''.join(string.split(UpperCamelCase_ ) )
return string
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCamelCase_ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ : Union[str, Any] = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 501
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : str = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = GPTaTokenizer
def __init__( self : Any , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[Any]="<|endoftext|>" , UpperCamelCase_ : Dict="<|endoftext|>" , UpperCamelCase_ : Any="<|endoftext|>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : Any = kwargs.pop('''add_bos_token''' , UpperCamelCase_ )
lowerCamelCase_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCamelCase_ : List[str] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : int = add_prefix_space
lowerCamelCase_ : Tuple = pre_tok_class(**UpperCamelCase_ )
lowerCamelCase_ : List[str] = add_prefix_space
def __UpperCamelCase ( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ : List[str] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
lowerCamelCase_ : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 501
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCamelCase : List[Any] = """src/transformers"""
_lowerCamelCase : Optional[Any] = """docs/source/en/tasks"""
def _SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE : Optional[int] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : int = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCamelCase : Dict = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCamelCase : Optional[int] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set() )
SCREAMING_SNAKE_CASE : Optional[int] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
SCREAMING_SNAKE_CASE : Dict = get_model_list_for_task(lowerCAmelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCamelCase : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 709
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , snake_case : Union[str, Any] , snake_case : Optional[int]=7 , snake_case : Tuple=3 , snake_case : List[Any]=30 , snake_case : Union[str, Any]=400 , snake_case : Optional[int]=True , snake_case : Tuple=None , snake_case : List[Any]=True , snake_case : Dict=[0.5, 0.5, 0.5] , snake_case : List[Any]=[0.5, 0.5, 0.5] , snake_case : str=True , snake_case : Any=1 / 255 , snake_case : Optional[int]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : int = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : Any = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean
SCREAMING_SNAKE_CASE : Dict = image_std
SCREAMING_SNAKE_CASE : Dict = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : Optional[int] , snake_case : List[str] , snake_case : Dict=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : Optional[int] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Tuple = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : int = max(snake_case , key=lambda snake_case : item[0] )[0]
SCREAMING_SNAKE_CASE : Optional[int] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = DeformableDetrImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
SCREAMING_SNAKE_CASE : Any = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : List[Any] = image_processing(snake_case , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(snake_case , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE : str = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Optional[int] = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : str = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE : int = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
SCREAMING_SNAKE_CASE : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
SCREAMING_SNAKE_CASE : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
SCREAMING_SNAKE_CASE : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : Union[str, Any] = DeformableDetrImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Any = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
SCREAMING_SNAKE_CASE : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 308
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
A_ : str = datasets.utils.logging.get_logger(__name__)
class _lowerCAmelCase( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
a : bool =None
a : bool =None
class _lowerCAmelCase( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
a : str =datasets.Audio()
a : List[Any] ='''audio'''
a : Optional[Any] =AudioFolderConfig
a : List[str] # definition at the bottom of the script
a : str =AudioClassification(audio_column='''audio''' , label_column='''label''' )
A_ : Optional[Any] = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
A_ : Tuple = AUDIO_EXTENSIONS
| 57
|
lowercase__ : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase__ : list[bool | None] = [None] * 10_000_000
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a = chain(next_number(__UpperCamelCase))
a = number_chain
while number < 10_00_00_00:
a = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10_00_00_00) -> int:
for i in range(1 , __UpperCamelCase):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(__UpperCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 515
| 0
|
_A = 8.314_4598
def lowercase_ ( A__ , A__ ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A = 3_00
_A = 28
_A = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 294
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_A = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowerCamelCase :
def __init__(self : Any , _A : int = 1_4 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case = primes[group]["prime"]
snake_case = primes[group]["generator"]
snake_case = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def UpperCAmelCase(self : Any ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase(self : Tuple ) -> str:
snake_case = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def UpperCAmelCase(self : Optional[int] , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase(self : List[Any] , _A : str ) -> str:
snake_case = int(_A , base=1_6 )
if not self.is_valid_public_key(_A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase(_A : int , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def UpperCAmelCase(_A : str , _A : str , _A : int = 1_4 ) -> str:
snake_case = int(_A , base=1_6 )
snake_case = int(_A , base=1_6 )
snake_case = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1_6_0_0_0 ):
SCREAMING_SNAKE_CASE_ = int(round(sample_rate * max_length ) )
if len(_lowerCAmelCase ) <= sample_length:
return wav
SCREAMING_SNAKE_CASE_ = randint(0 , len(_lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=__UpperCAmelCase , metadata={"help": "Name of a dataset from the datasets package"})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "A file containing the training audio paths and labels."})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "A file containing the validation audio paths and labels."})
SCREAMING_SNAKE_CASE__ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE__ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE__ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
SCREAMING_SNAKE_CASE__ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"})
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"})
SCREAMING_SNAKE_CASE__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."})
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={"help": "Whether to freeze the feature encoder layers of the model."})
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={"help": "Whether to generate an attention mask in the feature extractor."})
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[bool] = field(
default=__UpperCAmelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."})
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _A ( self: Union[str, Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , _lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def a ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
SCREAMING_SNAKE_CASE_ = DatasetDict()
SCREAMING_SNAKE_CASE_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
SCREAMING_SNAKE_CASE_ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
SCREAMING_SNAKE_CASE_ = feature_extractor.model_input_names[0]
def train_transforms(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
for audio in batch[data_args.audio_column_name]:
SCREAMING_SNAKE_CASE_ = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE_ = {model_input_name: inputs.get(_lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE_ = {model_input_name: inputs.get(_lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ = raw_datasets['''train'''].features[data_args.label_column_name].names
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = {}, {}
for i, label in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = str(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=eval_pred.label_ids )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
# Initialize our trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = last_checkpoint
SCREAMING_SNAKE_CASE_ = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
trainer.log_metrics('''eval''' , _lowerCAmelCase )
trainer.save_metrics('''eval''' , _lowerCAmelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
if __name__ == "__main__":
main()
| 234
|
from __future__ import annotations
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 1
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 1 , _lowerCamelCase: int = 1000 ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = 0
for divide_by_number in range(_lowerCAmelCase , digit + 1 ):
__lowerCamelCase : list[int] = []
__lowerCamelCase : Tuple = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCAmelCase ):
__lowerCamelCase : Any = len(_lowerCAmelCase )
__lowerCamelCase : Dict = divide_by_number
else:
has_been_divided.append(_lowerCAmelCase )
__lowerCamelCase : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
snake_case__ = ViTImageProcessor if is_vision_available() else None
@property
def lowerCamelCase__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = (3, 32, 128)
__lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowerCamelCase : Dict = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
__lowerCamelCase : int = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__lowerCamelCase : Dict = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple , **UpperCAmelCase : Optional[int] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , **UpperCAmelCase : List[str] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Any = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCamelCase : Union[str, Any] = Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) )
return image_input
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Any = self.get_image_processor()
__lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : str = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCamelCase : Any = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__lowerCamelCase : List[str] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[str] = self.get_image_processor()
__lowerCamelCase : Dict = self.get_tokenizer()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.prepare_image_inputs()
__lowerCamelCase : Optional[int] = image_processor(UpperCAmelCase , return_tensors="np" )
__lowerCamelCase : Tuple = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = self.get_image_processor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : int = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = "test"
__lowerCamelCase : List[Any] = processor(text=UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : str = "test"
__lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
__lowerCamelCase : Optional[Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Any = self.get_image_processor()
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : Dict = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase : List[Any] = processor.char_decode(UpperCAmelCase )
__lowerCamelCase : Dict = tokenizer.batch_decode(UpperCAmelCase )
__lowerCamelCase : Any = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : str = self.get_image_processor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = self.prepare_image_inputs()
__lowerCamelCase : List[str] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[Any] = self.get_image_processor()
__lowerCamelCase : Tuple = self.get_tokenizer()
__lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[int] = torch.randn(1 , 27 , 38 )
__lowerCamelCase : List[str] = torch.randn(1 , 27 , 50257 )
__lowerCamelCase : Union[str, Any] = torch.randn(1 , 27 , 30522 )
__lowerCamelCase : str = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 366
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : pyspark.sql.DataFrame , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "arrow" , **UpperCAmelCase_ : str , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Union[str, Any] = load_from_cache_file
UpperCamelCase__ : int = file_format
UpperCamelCase__ : Any = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __UpperCamelCase ( self : Optional[int]):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
UpperCamelCase__ : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 596
| 0
|
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = to_pil_image(lowercase_ )
SCREAMING_SNAKE_CASE__ = pil_image.size
SCREAMING_SNAKE_CASE__ = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
SCREAMING_SNAKE_CASE__ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
SCREAMING_SNAKE_CASE__ = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
SCREAMING_SNAKE_CASE__ = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
a = ["pixel_values"]
def __init__( self : Tuple , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : float = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "" , **__lowerCamelCase : Optional[Any] , ) -> None:
super().__init__(**__a )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(__a )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_value
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE__ = apply_ocr
SCREAMING_SNAKE_CASE__ = ocr_lang
SCREAMING_SNAKE_CASE__ = tesseract_config
def lowercase_ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowercase_ ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> np.ndarray:
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowercase_ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, Iterable[float]] , __lowerCamelCase : Union[float, Iterable[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowercase_ ( self : str , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : str=None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[int] , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__a )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for image in images:
SCREAMING_SNAKE_CASE__ = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__a , __a ) for image in images]
SCREAMING_SNAKE_CASE__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=__a )
if apply_ocr:
SCREAMING_SNAKE_CASE__ = words_batch
SCREAMING_SNAKE_CASE__ = boxes_batch
return data
| 707
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = AltDiffusionPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Tuple ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=0 ) -> Union[str, Any]:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : List[Any] ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ) -> Any:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 472
| 0
|
"""simple docstring"""
a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCAmelCase ( ):
__lowercase : Any = input('''Enter message: ''' )
__lowercase : Union[str, Any] = input('''Enter key [alphanumeric]: ''' )
__lowercase : List[Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase : Union[str, Any] = '''encrypt'''
__lowercase : Any = encrypt_message(__UpperCamelCase , __UpperCamelCase )
elif mode.lower().startswith('''d''' ):
__lowercase : int = '''decrypt'''
__lowercase : Dict = decrypt_message(__UpperCamelCase , __UpperCamelCase )
print(f"""\n{mode.title()}ed message:""" )
print(__UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return translate_message(__UpperCamelCase , __UpperCamelCase , '''encrypt''' )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return translate_message(__UpperCamelCase , __UpperCamelCase , '''decrypt''' )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = []
__lowercase : Union[str, Any] = 0
__lowercase : Optional[int] = key.upper()
for symbol in message:
__lowercase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__UpperCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__UpperCamelCase ):
__lowercase : List[Any] = 0
else:
translated.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
main()
| 76
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[Any] = psutil.Process()
__lowercase : Any = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = -1
while True:
__lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = True
__lowercase : List[Any] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[int] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
# Time
__lowercase : Union[str, Any] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
# Time
__lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Dict = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 76
| 1
|
def lowerCamelCase_ ( _a : float , _a : float , _a : int ):
'''simple docstring'''
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(_a , _a ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ : Optional[int] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ : List[Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self: str ,lowerCamelCase_: bool ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[int] = None ) -> int:
super().__init__()
UpperCAmelCase_ : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : Optional[int] = torch.zeros(lowerCamelCase_ ,lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Tuple = torch.nn.Parameter(lowerCamelCase_ )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : VQModel
A__ : CLIPTextModel
A__ : CLIPTokenizer
A__ : TransformeraDModel
A__ : LearnedClassifierFreeSamplingEmbeddings
A__ : VQDiffusionScheduler
def __init__( self: str ,lowerCamelCase_: VQModel ,lowerCamelCase_: CLIPTextModel ,lowerCamelCase_: CLIPTokenizer ,lowerCamelCase_: TransformeraDModel ,lowerCamelCase_: VQDiffusionScheduler ,lowerCamelCase_: LearnedClassifierFreeSamplingEmbeddings ,) -> int:
super().__init__()
self.register_modules(
vqvae=lowerCamelCase_ ,transformer=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,learned_classifier_free_sampling_embeddings=lowerCamelCase_ ,)
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase_ : List[Any] = self.tokenizer(
lowerCamelCase_ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase_ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : int = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase_ ,1 ,1 )
else:
UpperCAmelCase_ : Optional[int] = [""""""] * batch_size
UpperCAmelCase_ : Tuple = text_input_ids.shape[-1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
lowerCamelCase_ ,padding="""max_length""" ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[str] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : str = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple ,lowerCamelCase_: Union[str, List[str]] ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: float = 1.0 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[torch.FloatTensor] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase_: int = 1 ,) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = len(lowerCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase_ : Any = batch_size * num_images_per_prompt
UpperCAmelCase_ : int = guidance_scale > 1.0
UpperCAmelCase_ : Union[str, Any] = self._encode_prompt(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCamelCase_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Optional[int] = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : Optional[int] = torch.full(lowerCamelCase_ ,lowerCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase_ : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ ,device=self.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : Any = latents
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Tuple = self.transformer(lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,timestep=lowerCamelCase_ ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = model_output.chunk(2 )
UpperCAmelCase_ : str = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase_ ,dim=1 ,keepdim=lowerCamelCase_ )
UpperCAmelCase_ : str = self.truncate(lowerCamelCase_ ,lowerCamelCase_ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCamelCase_ ,timestep=lowerCamelCase_ ,sample=lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : Union[str, Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : List[Any] = self.vqvae.quantize.get_codebook_entry(lowerCamelCase_ ,shape=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.vqvae.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : str = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: float ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.sort(lowerCamelCase_ ,1 ,descending=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.exp(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.cat((all_true, keep_mask) ,dim=1 )
UpperCAmelCase_ : Union[str, Any] = keep_mask[:, :-1, :]
UpperCAmelCase_ : Dict = keep_mask.gather(1 ,indices.argsort(1 ) )
UpperCAmelCase_ : Any = log_p_x_0.clone()
UpperCAmelCase_ : int = -torch.inf # -inf = log(0)
return rv
| 322
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = IFPipeline
UpperCamelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCamelCase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self : str ) ->Optional[int]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCAmelCase ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=0 ) ->Dict:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : Optional[Any] ) ->int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase ( self : Optional[int] ) ->Dict:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self : Tuple ) ->Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self : int ) ->int:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) ->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase__ = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ = None
UpperCAmelCase__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ) ->Dict:
'''simple docstring'''
_start_torch_memory_measurement()
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCamelCase_ )
UpperCAmelCase__ = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 392
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
@slow
def UpperCAmelCase ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
| 392
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase__ ( __A: str ,__A: int=False ):
'''simple docstring'''
__magic_name__ : List[Any] = OmegaConf.load(__A )
if display:
print(yaml.dump(OmegaConf.to_container(__A ) ) )
return config
def lowercase__ ( __A: int ,__A: Optional[int]=None ,__A: Union[str, Any]=None ):
'''simple docstring'''
if conf_path is None:
__magic_name__ : Tuple = '''./model_checkpoints/vqgan_only.yaml'''
__magic_name__ : List[str] = load_config(__A ,display=__A )
__magic_name__ : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Tuple = '''./model_checkpoints/vqgan_only.pt'''
__magic_name__ : Any = torch.load(__A ,map_location=__A )
if ".ckpt" in ckpt_path:
__magic_name__ : Optional[int] = sd['''state_dict''']
model.load_state_dict(__A ,strict=__A )
model.to(__A )
del sd
return model
def lowercase__ ( __A: Tuple ,__A: str ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ : int = model.encode(__A )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__magic_name__ : List[Any] = model.decode(__A )
return xrec
def lowercase__ ( __A: Dict ,__A: Union[str, Any]=False ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] = string.rsplit('''.''' ,1 )
if reload:
__magic_name__ : Dict = importlib.import_module(__A )
importlib.reload(__A )
return getattr(importlib.import_module(__A ,package=__A ) ,cls )
def lowercase__ ( __A: Optional[Any] ):
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' ,{} ) )
def lowercase__ ( __A: str ,__A: Union[str, Any] ,__A: List[Any]=True ,__A: Optional[int]=True ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = instantiate_from_config(__A )
if sd is not None:
model.load_state_dict(__A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase__ ( __A: Optional[Any] ,__A: int ,__A: int ,__A: str ):
'''simple docstring'''
if ckpt:
__magic_name__ : str = torch.load(__A ,map_location='''cpu''' )
__magic_name__ : Any = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__magic_name__ : Any = {'''state_dict''': None}
__magic_name__ : Tuple = None
__magic_name__ : str = load_model_from_config(config.model ,pl_sd['''state_dict'''] ,gpu=__A ,eval_mode=__A )['''model''']
return model, global_step
| 501
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_A : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 142
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= tempfile.mkdtemp()
# fmt: off
lowercase__ : Optional[Any]= ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Any= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : str= ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Union[str, Any]= {"unk_token": "<unk>"}
lowercase__ : str= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
lowercase__ : List[str]= {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : List[str]= [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.get_tokenizer()
lowercase__ : Dict= self.get_rust_tokenizer()
lowercase__ : List[str]= self.get_image_processor()
lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
lowercase__ : Union[str, Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : List[Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[int]= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Union[str, Any]= self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowercase__ : List[str]= CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.get_image_processor()
lowercase__ : Dict= self.get_tokenizer()
lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= self.prepare_image_inputs()
lowercase__ : Union[str, Any]= image_processor(snake_case__ , return_tensors="np" )
lowercase__ : Any= processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_image_processor()
lowercase__ : str= self.get_tokenizer()
lowercase__ : List[str]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= "lower newer"
lowercase__ : Union[str, Any]= processor(text=snake_case__ )
lowercase__ : List[Any]= tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_image_processor()
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : Tuple= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Any= "lower newer"
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : List[str]= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_image_processor()
lowercase__ : List[Any]= self.get_tokenizer()
lowercase__ : Optional[Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : List[Any]= self.prepare_image_inputs()
lowercase__ : Dict= processor(images=snake_case__ , visual_prompt=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.get_image_processor()
lowercase__ : List[Any]= self.get_tokenizer()
lowercase__ : Any= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : List[Any]= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[Any]= processor.batch_decode(snake_case__ )
lowercase__ : Optional[int]= tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 704
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85
| 0
|
import torch
from diffusers import DiffusionPipeline
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Optional[int] , __A : Optional[Any] , __A : Dict ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
def __call__( self : List[Any] ):
__A : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__A : List[str] = 1
__A : Union[str, Any] = self.unet(__A , __A ).sample
__A : Union[str, Any] = self.scheduler.step(__A , __A , __A ).prev_sample
__A : int = scheduler_output - scheduler_output + torch.ones_like(__A )
return result
| 17
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=5_12 , __UpperCAmelCase=3 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ) -> Tuple:
A : Optional[int] = parent
A : int = batch_size
A : Union[str, Any] = seq_length
A : Optional[Any] = is_training
A : int = use_input_mask
A : List[Any] = use_token_type_ids
A : Optional[int] = use_labels
A : List[Any] = vocab_size
A : int = block_sizes
A : Optional[int] = num_decoder_layers
A : int = d_model
A : Dict = n_head
A : Tuple = d_head
A : int = d_inner
A : Tuple = hidden_act
A : Tuple = hidden_dropout
A : Any = attention_dropout
A : Any = activation_dropout
A : List[Any] = max_position_embeddings
A : int = type_vocab_size
A : Dict = 2
A : List[str] = num_labels
A : Optional[int] = num_choices
A : int = scope
A : int = initializer_std
# Used in the tests to check the size of the first attention layer
A : Optional[int] = n_head
# Used in the tests to check the size of the first hidden state
A : List[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A : Union[str, Any] = self.num_hidden_layers + 2
def snake_case ( self ) -> List[Any]:
A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any = None
if self.use_input_mask:
A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A : List[str] = None
if self.use_token_type_ids:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Any = None
A : Union[str, Any] = None
A : Optional[Any] = None
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : List[str] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
A : str = TFFunnelModel(config=__UpperCAmelCase )
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Tuple = model(__UpperCAmelCase )
A : str = [input_ids, input_mask]
A : Optional[Any] = model(__UpperCAmelCase )
A : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A : str = False
A : List[str] = TFFunnelModel(config=__UpperCAmelCase )
A : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A : Union[str, Any] = False
A : Tuple = TFFunnelModel(config=__UpperCAmelCase )
A : Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
A : Dict = TFFunnelBaseModel(config=__UpperCAmelCase )
A : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : List[str] = model(__UpperCAmelCase )
A : Union[str, Any] = [input_ids, input_mask]
A : Optional[Any] = model(__UpperCAmelCase )
A : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A : List[Any] = False
A : Union[str, Any] = TFFunnelBaseModel(config=__UpperCAmelCase )
A : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A : int = False
A : Tuple = TFFunnelBaseModel(config=__UpperCAmelCase )
A : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
A : List[str] = TFFunnelForPreTraining(config=__UpperCAmelCase )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
A : str = TFFunnelForMaskedLM(config=__UpperCAmelCase )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
A : Tuple = self.num_labels
A : List[Any] = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
A : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
A : Union[str, Any] = self.num_choices
A : Tuple = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
A : Tuple = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A : List[Any] = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A : Any = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
A : Optional[Any] = self.num_labels
A : List[str] = TFFunnelForTokenClassification(config=__UpperCAmelCase )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
A : Dict = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ) -> str:
A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Optional[Any] = config_and_inputs
A : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : Optional[Any] = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[Any] = False
def snake_case ( self ) -> str:
A : Dict = TFFunnelModelTester(self )
A : int = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def snake_case ( self ) -> List[str]:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ) -> Union[str, Any]:
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Union[str, Any] = False
def snake_case ( self ) -> int:
A : Optional[int] = TFFunnelModelTester(self , base=__UpperCAmelCase )
A : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case ( self ) -> Tuple:
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def snake_case ( self ) -> int:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 542
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowercase ( _UpperCAmelCase ) -> list[list[float]]:
'''simple docstring'''
__lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
__lowercase = [[0.0, 0.0], [0.0, 0.0]]
__lowercase , __lowercase = matrix[1][1], matrix[0][0]
__lowercase , __lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
__lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowercase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
__lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowercase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 576
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 576
| 1
|
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
_A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_snake_case ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Optional[Any] = logging.get_logger(__name__)
a__: Tuple = {}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''llama'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
def __init__( self,__lowerCamelCase=3_2000,__lowerCamelCase=4096,__lowerCamelCase=1_1008,__lowerCamelCase=32,__lowerCamelCase=32,__lowerCamelCase=None,__lowerCamelCase="silu",__lowerCamelCase=2048,__lowerCamelCase=0.02,__lowerCamelCase=1E-6,__lowerCamelCase=True,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=False,__lowerCamelCase=None,**__lowerCamelCase,):
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
A__ = num_attention_heads
A__ = num_key_value_heads
A__ = hidden_act
A__ = initializer_range
A__ = rms_norm_eps
A__ = pretraining_tp
A__ = use_cache
A__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,tie_word_embeddings=__lowerCamelCase,**__lowerCamelCase,)
def UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling,__lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
A__ = self.rope_scaling.get('''type''',__lowerCamelCase )
A__ = self.rope_scaling.get('''factor''',__lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase,__lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 212
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Any = logging.get_logger(__name__)
a__: List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''trocr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self,__lowerCamelCase=5_0265,__lowerCamelCase=1024,__lowerCamelCase=12,__lowerCamelCase=16,__lowerCamelCase=4096,__lowerCamelCase="gelu",__lowerCamelCase=512,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,**__lowerCamelCase,):
A__ = vocab_size
A__ = d_model
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = decoder_ffn_dim
A__ = activation_function
A__ = max_position_embeddings
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = init_std
A__ = decoder_layerdrop
A__ = use_cache
A__ = scale_embedding
A__ = use_learned_position_embeddings
A__ = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,decoder_start_token_id=__lowerCamelCase,**__lowerCamelCase,)
| 212
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blip_2_vision_model'''
def __init__( self , _lowercase=1_4_0_8 , _lowercase=6_1_4_4 , _lowercase=3_9 , _lowercase=1_6 , _lowercase=2_2_4 , _lowercase=1_4 , _lowercase="gelu" , _lowercase=0.0_0001 , _lowercase=0.0 , _lowercase=1E-10 , _lowercase=True , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : int = hidden_size
snake_case_ : Any = intermediate_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = patch_size
snake_case_ : Dict = image_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : int = hidden_act
snake_case_ : Optional[Any] = qkv_bias
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : Tuple = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
snake_case_ : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blip_2_qformer'''
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase="absolute" , _lowercase=2 , _lowercase=1_4_0_8 , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : str = hidden_act
snake_case_ : int = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : List[str] = cross_attention_frequency
snake_case_ : Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : Optional[int] = cls.get_config_dict(_lowercase , **_lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
snake_case_ : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blip-2'''
_lowerCamelCase = True
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=3_2 , **_lowercase ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
if vision_config is None:
snake_case_ : Optional[int] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
snake_case_ : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
snake_case_ : Tuple = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
snake_case_ : Tuple = BlipaVisionConfig(**_lowercase )
snake_case_ : Union[str, Any] = BlipaQFormerConfig(**_lowercase )
snake_case_ : Union[str, Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
snake_case_ : Optional[int] = CONFIG_MAPPING[text_model_type](**_lowercase )
snake_case_ : Optional[int] = self.text_config.tie_word_embeddings
snake_case_ : Dict = self.text_config.is_encoder_decoder
snake_case_ : Any = num_query_tokens
snake_case_ : List[str] = self.vision_config.hidden_size
snake_case_ : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case_ : Dict = 1.0
snake_case_ : Optional[Any] = 0.02
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , _lowercase , _lowercase , **_lowercase , ) -> Dict:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Union[str, Any] = self.qformer_config.to_dict()
snake_case_ : Dict = self.text_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 58
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __snake_case ( lowercase : Union[str, Any] ):
for param in module.parameters():
snake_case_ = False
def __snake_case ( ):
snake_case_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"
" with generations." )
return device
def __snake_case ( lowercase : Union[str, Any] ):
snake_case_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __snake_case ( ):
snake_case_ = datetime.now()
snake_case_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 721
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowercase__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowercase__ = {
'''jukebox''': 5_12,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=["v3", "v2", "v2"] , UpperCAmelCase_=5_12 , UpperCAmelCase_=5 , UpperCAmelCase_="<|endoftext|>" , **UpperCAmelCase_ , ):
snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = version
snake_case_ = max_n_lyric_tokens
snake_case_ = n_genres
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
snake_case_ = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
snake_case_ = oov.replace(R"\-'" , R"\-+'" )
snake_case_ = regex.compile(UpperCAmelCase_ )
snake_case_ = {v: k for k, v in self.artists_encoder.items()}
snake_case_ = {v: k for k, v in self.genres_encoder.items()}
snake_case_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowercase ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowercase ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
snake_case_ = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]]
snake_case_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case_ = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowercase ( self , UpperCAmelCase_ ):
return list(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ , snake_case_ , snake_case_ = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case_ = artists[idx].lower()
snake_case_ = [genres[idx].lower()]
else:
snake_case_ = self._normalize(artists[idx] ) + ".v2"
snake_case_ = [
self._normalize(UpperCAmelCase_ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case_ = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
snake_case_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
snake_case_ = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
snake_case_ = 0
snake_case_ = len(UpperCAmelCase_ ) + 1
snake_case_ = self.vocab
snake_case_ = {v: k for k, v in self.vocab.items()}
snake_case_ = ""
else:
snake_case_ = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
snake_case_ = self._run_strip_accents(UpperCAmelCase_ )
snake_case_ = lyrics.replace("\\" , "\n" )
snake_case_ = self.out_of_vocab.sub("" , UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = unicodedata.normalize("NFD" , UpperCAmelCase_ )
snake_case_ = []
for char in text:
snake_case_ = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = (
[chr(UpperCAmelCase_ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
snake_case_ = frozenset(UpperCAmelCase_ )
snake_case_ = re.compile(R"_+" )
snake_case_ = "".join([c if c in accepted else "_" for c in text.lower()] )
snake_case_ = pattern.sub("_" , UpperCAmelCase_ ).strip("_" )
return text
def _lowercase ( self , UpperCAmelCase_ ):
return " ".join(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
# Convert to TensorType
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
snake_case_ = tf.constant
snake_case_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
snake_case_ = torch.tensor
snake_case_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
snake_case_ = jnp.array
snake_case_ = _is_jax
else:
snake_case_ = np.asarray
snake_case_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case_ = [inputs]
if not is_tensor(UpperCAmelCase_ ):
snake_case_ = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_="pt" ):
snake_case_ = [0, 0, 0]
snake_case_ = [artist] * len(self.version )
snake_case_ = [genres] * len(self.version )
snake_case_ , snake_case_ , snake_case_ = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ , snake_case_ = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = [-INFINITY] * len(full_tokens[-1] )
snake_case_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) )
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) )
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.artists_decoder.get(UpperCAmelCase_ )
snake_case_ = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
snake_case_ = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 420
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = 42
class __UpperCamelCase ( _a ,_a ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase__ = 6_5_5_3_6 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (3_2, 3_2, 6_4) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ):
super().__init__()
UpperCAmelCase__: Dict = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase__: Union[str, Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase__: Any = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase__: Dict = block_out_channels[0] * 4
UpperCAmelCase__: Optional[int] = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
UpperCAmelCase__: Any = nn.ModuleList([] )
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Union[str, Any] = nn.ModuleList([] )
UpperCAmelCase__: Any = None
# down
UpperCAmelCase__: Any = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = output_channel
UpperCAmelCase__: Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase__: Tuple = i == len(lowerCamelCase__ ) - 1
UpperCAmelCase__: Any = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
UpperCAmelCase__: Optional[Any] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
UpperCAmelCase__: List[str] = list(reversed(lowerCamelCase__ ) )
UpperCAmelCase__: Any = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase__: List[Any] = out_channels
else:
UpperCAmelCase__: str = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
UpperCAmelCase__: Tuple = output_channel
UpperCAmelCase__: Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
UpperCAmelCase__: Optional[int] = i == len(lowerCamelCase__ ) - 1
UpperCAmelCase__: Optional[Any] = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = output_channel
# out
UpperCAmelCase__: Tuple = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
UpperCAmelCase__: Dict = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
UpperCAmelCase__: Optional[Any] = timestep
if not torch.is_tensor(lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCAmelCase__: Optional[Any] = timesteps[None].to(sample.device )
UpperCAmelCase__: Any = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
UpperCAmelCase__: Union[str, Any] = self.time_mlp(lowerCamelCase__ )
else:
UpperCAmelCase__: int = timestep_embed[..., None]
UpperCAmelCase__: List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCAmelCase__: Optional[int] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCAmelCase__: List[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase__ , UpperCAmelCase__: int = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase__: Dict = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCAmelCase__: Dict = down_block_res_samples[-1:]
UpperCAmelCase__: str = down_block_res_samples[:-1]
UpperCAmelCase__: Dict = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
UpperCAmelCase__: List[str] = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 113
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCAmelCase : List[str] =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCAmelCase : List[str] =get_tests_dir("""fixtures/vocab.json""")
_lowerCAmelCase : Dict =get_tests_dir("""fixtures""")
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__magic_name__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = 0
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[int] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: Any = WavaVecaConfig()
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__: int = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "vocab.json" ) )
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: Optional[Any] = WavaVecaFeatureExtractor()
UpperCAmelCase__: List[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
UpperCAmelCase__: Dict = WavaVecaProcessor(lowerCamelCase__ , lowerCamelCase__ )
# save in new folder
processor.save_pretrained(lowerCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "r" ) as f:
UpperCAmelCase__: Optional[int] = json.load(lowerCamelCase__ )
config_dict.pop("processor_class" )
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f:
f.write(json.dumps(lowerCamelCase__ ) )
UpperCAmelCase__: Any = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: str = WavaVecaFeatureExtractor()
UpperCAmelCase__: Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
UpperCAmelCase__: List[Any] = WavaVecaProcessor(lowerCamelCase__ , lowerCamelCase__ )
# save in new folder
processor.save_pretrained(lowerCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "r" ) as f:
UpperCAmelCase__: str = json.load(lowerCamelCase__ )
config_dict.pop("processor_class" )
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f:
f.write(json.dumps(lowerCamelCase__ ) )
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: Union[str, Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(lowerCamelCase__ )
# copy relevant files
copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f:
f.write("{}" )
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
UpperCAmelCase__: Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
UpperCAmelCase__: Any = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCAmelCase__: Dict = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
UpperCAmelCase__: str = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _UpperCAmelCase ( self ):
try:
AutoConfig.register("custom" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__: Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: Tuple = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase__: str = CustomTokenizer(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = CustomProcessor(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ):
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = False
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = False
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = "AutoFeatureExtractor"
__magic_name__ = "AutoTokenizer"
__magic_name__ = False
try:
AutoConfig.register("custom" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local classes.
UpperCAmelCase__: Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__magic_name__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _UpperCAmelCase ( cls ):
UpperCAmelCase__: Dict = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def _UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = WavaVecaProcessor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase__ , "test-processor" ) , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCAmelCase__: Union[str, Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(new_processor.feature_extractor , lowerCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = WavaVecaProcessor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase__ , "test-processor-org" ) , push_to_hub=lowerCamelCase__ , use_auth_token=self._token , organization="valid_org" , )
UpperCAmelCase__: Optional[int] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(new_processor.feature_extractor , lowerCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
UpperCAmelCase__: List[str] = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: Optional[int] = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase__: Optional[Any] = CustomTokenizer(lowerCamelCase__ )
UpperCAmelCase__: str = CustomProcessor(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
UpperCAmelCase__: int = Repository(lowerCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(lowerCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) ) as f:
UpperCAmelCase__: Tuple = json.load(lowerCamelCase__ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_processing.py" ) ) )
repo.push_to_hub()
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 113
| 1
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a = get_logger(__name__)
class a_ :
def __init__( self : Union[str, Any] , a_ : Optional[str] = None ) -> Any:
snake_case: Any =(
os.path.join(a_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case: Dict =Extractor
def UpperCamelCase ( self : int , a_ : str ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case: Union[str, Any] =os.path.abspath(a_ )
return os.path.join(self.extract_dir , hash_url_to_filename(a_ ) )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : bool ) -> bool:
return force_extract or (
not os.path.isfile(a_ ) and not (os.path.isdir(a_ ) and os.listdir(a_ ))
)
def UpperCamelCase ( self : List[str] , a_ : str , a_ : bool = False ) -> str:
snake_case: Optional[int] =self.extractor.infer_extractor_format(a_ )
if not extractor_format:
return input_path
snake_case: int =self._get_output_path(a_ )
if self._do_extract(a_ , a_ ):
self.extractor.extract(a_ , a_ , a_ )
return output_path
class a_ ( UpperCAmelCase__ ):
@classmethod
@abstractmethod
def UpperCamelCase ( cls : Any , a_ : Union[Path, str] , **a_ : List[Any] ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
...
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : int ) -> Dict:
with open(a_ , 'rb' ) as f:
return f.read(a_ )
@classmethod
def UpperCamelCase ( cls : Tuple , a_ : Union[Path, str] , a_ : bytes = b"" ) -> bool:
if not magic_number:
snake_case: str =max(len(a_ ) for cls_magic_number in cls.magic_numbers )
try:
snake_case: Any =cls.read_magic_number(a_ , a_ )
except OSError:
return False
return any(magic_number.startswith(a_ ) for cls_magic_number in cls.magic_numbers )
class a_ ( UpperCAmelCase__ ):
@classmethod
def UpperCamelCase ( cls : str , a_ : Union[Path, str] , **a_ : Optional[Any] ) -> bool:
return tarfile.is_tarfile(a_ )
@staticmethod
def UpperCamelCase ( a_ : Optional[Any] , a_ : int ) -> Union[str, Any]:
def resolved(a_ : str ) -> str:
return os.path.realpath(os.path.abspath(a_ ) )
def badpath(a_ : str , a_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a_ , a_ ) ).startswith(a_ )
def badlink(a_ : List[Any] , a_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case: str =resolved(os.path.join(a_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a_ )
snake_case: Dict =resolved(a_ )
for finfo in members:
if badpath(finfo.name , a_ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(a_ , a_ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(a_ , a_ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
os.makedirs(a_ , exist_ok=a_ )
snake_case: Optional[int] =tarfile.open(a_ )
tar_file.extractall(a_ , members=TarExtractor.safemembers(a_ , a_ ) )
tar_file.close()
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : str = [B'\x1F\x8B']
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
with gzip.open(a_ , 'rb' ) as gzip_file:
with open(a_ , 'wb' ) as extracted_file:
shutil.copyfileobj(a_ , a_ )
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : Dict = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCamelCase ( cls : List[Any] , a_ : Union[Path, str] , a_ : bytes = b"" ) -> bool:
if super().is_extractable(a_ , magic_number=a_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a_ , 'rb' ) as fp:
snake_case: int =_EndRecData(a_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case: List[Any] =fp.read(a_ ) # CD is where we expect it to be
if len(a_ ) == sizeCentralDir:
snake_case: Tuple =struct.unpack(a_ , a_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
os.makedirs(a_ , exist_ok=a_ )
with zipfile.ZipFile(a_ , 'r' ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : List[Any] = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
with lzma.open(a_ ) as compressed_file:
with open(a_ , 'wb' ) as extracted_file:
shutil.copyfileobj(a_ , a_ )
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : List[str] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(a_ , exist_ok=a_ )
snake_case: Dict =rarfile.RarFile(a_ )
rf.extractall(a_ )
rf.close()
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : List[str] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
snake_case: int =zstd.ZstdDecompressor()
with open(a_ , 'rb' ) as ifh, open(a_ , 'wb' ) as ofh:
dctx.copy_stream(a_ , a_ )
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : int = [B'\x42\x5A\x68']
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
with bza.open(a_ , 'rb' ) as compressed_file:
with open(a_ , 'wb' ) as extracted_file:
shutil.copyfileobj(a_ , a_ )
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : int = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(a_ , exist_ok=a_ )
with pyazr.SevenZipFile(a_ , 'r' ) as archive:
archive.extractall(a_ )
class a_ ( UpperCAmelCase__ ):
UpperCAmelCase : Optional[int] = [B'\x04\x22\x4D\x18']
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : Union[Path, str] ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(a_ , 'rb' ) as compressed_file:
with open(a_ , 'wb' ) as extracted_file:
shutil.copyfileobj(a_ , a_ )
class a_ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase ( cls : List[Any] ) -> Optional[int]:
return max(
len(a_ )
for extractor in cls.extractors.values()
if issubclass(a_ , a_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase ( a_ : Union[Path, str] , a_ : int ) -> Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(a_ , magic_number_length=a_ )
except OSError:
return b""
@classmethod
def UpperCamelCase ( cls : str , a_ : Union[Path, str] , a_ : bool = False ) -> bool:
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=a_ , )
snake_case: Union[str, Any] =cls.infer_extractor_format(a_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase ( cls : Any , a_ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
snake_case: Dict =cls._get_magic_number_max_length()
snake_case: Optional[Any] =cls._read_magic_number(a_ , a_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a_ , magic_number=a_ ):
return extractor_format
@classmethod
def UpperCamelCase ( cls : List[Any] , a_ : Union[Path, str] , a_ : Union[Path, str] , a_ : Optional[str] = None , a_ : Optional[BaseExtractor] = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(a_ ) , exist_ok=a_ )
# Prevent parallel extractions
snake_case: Dict =str(Path(a_ ).with_suffix('.lock' ) )
with FileLock(a_ ):
shutil.rmtree(a_ , ignore_errors=a_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a_ , a_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=a_ , )
snake_case: Any =extractor if extractor != """deprecated""" else extractor_format
else:
snake_case: Any =cls.extractors[extractor_format]
return extractor.extract(a_ , a_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=a_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a_ ):
return extractor.extract(a_ , a_ )
| 714
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a = logging.getLogger(__name__)
class a_ ( snake_case ):
UpperCAmelCase : Any = """sequence-classification"""
def __init__( self : int , a_ : str ) -> str:
if type(a_ ) == dict:
snake_case: List[Any] =Namespace(**a_ )
snake_case: Tuple =glue_output_modes[hparams.task]
snake_case: Any =glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def UpperCamelCase ( self : Tuple , **a_ : Tuple ) -> Union[str, Any]:
return self.model(**a_ )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Optional[int] ) -> Optional[int]:
snake_case: Any ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Optional[int] =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: Optional[int] =self(**a_ )
snake_case: Any =outputs[0]
snake_case: Union[str, Any] =self.trainer.lr_schedulers[0]['scheduler']
snake_case: str ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self : str ) -> Tuple:
snake_case: int =self.hparams
snake_case: Union[str, Any] =processors[args.task]()
snake_case: Union[str, Any] =processor.get_labels()
for mode in ["train", "dev"]:
snake_case: Optional[Any] =self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
snake_case: int =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
snake_case: Tuple =convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , a_ )
torch.save(a_ , a_ )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : int , a_ : bool = False ) -> DataLoader:
snake_case: List[Any] ='dev' if mode == 'test' else mode
snake_case: Union[str, Any] =self._feature_file(a_ )
logger.info('Loading features from cached file %s' , a_ )
snake_case: Dict =torch.load(a_ )
snake_case: Union[str, Any] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case: List[Any] =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case: str =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case: Optional[Any] =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case: Union[str, Any] =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def UpperCamelCase ( self : List[str] , a_ : Optional[int] , a_ : Any ) -> Dict:
snake_case: int ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Tuple =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: List[str] =self(**a_ )
snake_case , snake_case: str =outputs[:2]
snake_case: Any =logits.detach().cpu().numpy()
snake_case: Union[str, Any] =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : int , a_ : Union[str, Any] ) -> tuple:
snake_case: Optional[Any] =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
snake_case: str =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case: Union[str, Any] =np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case: Optional[Any] =np.squeeze(a_ )
snake_case: Tuple =np.concatenate([x['target'] for x in outputs] , axis=0 )
snake_case: Any =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: str =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: int ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
snake_case: Union[str, Any] =dict(results.items() )
snake_case: Dict =results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : str , a_ : list ) -> dict:
snake_case , snake_case , snake_case: Union[str, Any] =self._eval_end(a_ )
snake_case: Optional[Any] =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : Tuple , a_ : Tuple ) -> dict:
snake_case , snake_case , snake_case: int =self._eval_end(a_ )
snake_case: List[Any] =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( a_ : Optional[int] , a_ : Dict ) -> Tuple:
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=a_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=a_ , required=a_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=a_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Any:
"""simple docstring"""
snake_case: Tuple =argparse.ArgumentParser()
add_generic_args(__UpperCAmelCase , os.getcwd() )
snake_case: List[Any] =GLUETransformer.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
snake_case: Optional[int] =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case: Optional[int] =os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case: str =GLUETransformer(__UpperCAmelCase )
snake_case: Tuple =generic_train(__UpperCAmelCase , __UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case: str =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCAmelCase ) )
snake_case: int =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 347
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def __lowerCAmelCase( ) -> Optional[Any]:
"""simple docstring"""
_A = 10
_A = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_A = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_SCREAMING_SNAKE_CASE ) ),
} , features=_SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
__A : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'file.txt'
_A = FILE_CONTENT
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
import bza
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
with bza.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
import gzip
_A = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
with gzip.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
with lza.frame.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as archive:
archive.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
import tarfile
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
import lzma
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
with lzma.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_A = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
with zstd.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'file.xml'
_A = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
__A : Optional[int] = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
__A : Optional[Any] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
__A : Any = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
__A : Optional[Any] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
__A : str = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def __lowerCAmelCase( ) -> List[str]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_A = datasets.Dataset.from_dict(_SCREAMING_SNAKE_CASE )
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
_A = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f:
_A = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f:
_A = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
import bza
_A = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
_A = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_A = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
_A = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE )
_A = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=_SCREAMING_SNAKE_CASE )
writer.write_table(_SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_A = {'data': DATA}
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_A = {'data': DATA_DICT_OF_LISTS}
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
import gzip
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
import gzip
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = ['0', '1', '2', '3']
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = ['0', '1', '2', '3']
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = ['0', '1', '2', '3']
_A = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported.ext' ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_A = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( ) -> Dict:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def __lowerCAmelCase( ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 27
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
UpperCamelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , UpperCAmelCase_ , )
is not None
):
UpperCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCamelCase = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
UpperCamelCase = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
UpperCamelCase = True
if not attribute_used:
UpperCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCamelCase = True
elif attribute.endswith("_token_id" ):
UpperCamelCase = True
# configuration class specific cases
if not case_allowed:
UpperCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCamelCase = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
UpperCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCamelCase = {}
if len(config_class.attribute_map ) > 0:
UpperCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCamelCase = inspect.getsourcefile(UpperCAmelCase_ )
UpperCamelCase = os.path.dirname(UpperCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCamelCase = [os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) for fn in os.listdir(UpperCAmelCase_ ) if fn.startswith("modeling_" )]
# Get the source code strings
UpperCamelCase = []
for path in modeling_paths:
if os.path.isfile(UpperCAmelCase_ ):
with open(UpperCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
UpperCamelCase = []
for config_param, default_value in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
UpperCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(UpperCAmelCase_ )
def lowerCamelCase__ ( )-> List[str]:
"""simple docstring"""
UpperCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda UpperCAmelCase_ : inspect.isclass(UpperCAmelCase_ )
and issubclass(UpperCAmelCase_ , UpperCAmelCase_ )
and inspect.getmodule(UpperCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCamelCase = check_config_attributes_being_used(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = unused_attributes
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 554
| 0
|
def a__ ( snake_case ) -> bool:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__lowerCAmelCase ) == 1:
return True
__SCREAMING_SNAKE_CASE : Dict = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( snake_case ) -> float:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
from math import pi, sqrt
def a__ ( snake_case ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def a__ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = 1.0
while num:
lowercase_ = float(input("""Gamma of: """))
print(f'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 131
| 0
|
"""simple docstring"""
import os
from math import logaa
def lowercase__ ( snake_case_ :str = "base_exp.txt" ):
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
__UpperCAmelCase , __UpperCAmelCase = list(map(snake_case_ , line.split(''',''' ) ) )
if x * logaa(snake_case_ ) > largest:
__UpperCAmelCase = x * logaa(snake_case_ )
__UpperCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
|
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49
| 1
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,) -> float:
"""simple docstring"""
_UpperCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
_UpperCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
_UpperCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_UpperCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase_ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 494
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 494
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class snake_case_ ( __UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "roc_bert"
def __init__( self : List[str] , _UpperCamelCase : Optional[Any]=3_0_5_2_2 , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Any=1_2 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : List[str]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Optional[int]=5_1_2 , _UpperCamelCase : str=2 , _UpperCamelCase : int=0.02 , _UpperCamelCase : Tuple=1e-12 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[str]=0 , _UpperCamelCase : Union[str, Any]="absolute" , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=True , _UpperCamelCase : Dict=7_6_8 , _UpperCamelCase : Optional[int]=9_1_0 , _UpperCamelCase : int=5_1_2 , _UpperCamelCase : Dict=2_4_8_5_8 , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : List[str] , ) ->Dict:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = enable_pronunciation
snake_case_ = enable_shape
snake_case_ = pronunciation_embed_dim
snake_case_ = pronunciation_vocab_size
snake_case_ = shape_embed_dim
snake_case_ = shape_vocab_size
snake_case_ = concat_input
snake_case_ = position_embedding_type
snake_case_ = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 39
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 479
| 0
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
lowerCamelCase__ : List[str] = nn.functional.normalize(UpperCamelCase )
lowerCamelCase__ : Tuple = nn.functional.normalize(UpperCamelCase )
return torch.mm(UpperCamelCase , normalized_text_embeds.t() )
class __A (A_ ):
UpperCamelCase :Optional[Any] = CLIPConfig
UpperCamelCase :Optional[Any] = ['''CLIPEncoderLayer''']
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = CLIPVisionModel(config.vision_config )
lowerCamelCase__ : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ )
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : Tuple = self.visual_projection(__magic_name__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ : Any = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase__ : str = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase__ : int = []
lowerCamelCase__ : str = image_embeds.shape[0]
for i in range(__magic_name__ ):
lowerCamelCase__ : str = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase__ : List[str] = special_cos_dist[i][concept_idx]
lowerCamelCase__ : Tuple = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase__ : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCamelCase__ : List[str] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase__ : str = cos_dist[i][concept_idx]
lowerCamelCase__ : Dict = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase__ : int = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__magic_name__ )
result.append(__magic_name__ )
lowerCamelCase__ : Tuple = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Union[str, Any] = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : str = self.visual_projection(__magic_name__ )
lowerCamelCase__ : Optional[Any] = cosine_distance(__magic_name__ , self.special_care_embeds )
lowerCamelCase__ : Dict = cosine_distance(__magic_name__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Any = 0.0
lowerCamelCase__ : Dict = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase__ : Union[str, Any] = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase__ : Optional[Any] = special_care * 0.01
lowerCamelCase__ : Dict = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase__ : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase__ : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 717
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
lowerCamelCase__ : List[str] = nn.functional.normalize(UpperCamelCase )
lowerCamelCase__ : Tuple = nn.functional.normalize(UpperCamelCase )
return torch.mm(UpperCamelCase , normalized_text_embeds.t() )
class __A ( A_ ):
UpperCamelCase :Optional[Any] = CLIPConfig
UpperCamelCase :Optional[Any] = ['''CLIPEncoderLayer''']
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = CLIPVisionModel(config.vision_config )
lowerCamelCase__ : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ )
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : Tuple = self.visual_projection(__magic_name__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ : Any = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase__ : str = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase__ : int = []
lowerCamelCase__ : str = image_embeds.shape[0]
for i in range(__magic_name__ ):
lowerCamelCase__ : str = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase__ : List[str] = special_cos_dist[i][concept_idx]
lowerCamelCase__ : Tuple = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase__ : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCamelCase__ : List[str] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase__ : str = cos_dist[i][concept_idx]
lowerCamelCase__ : Dict = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase__ : int = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__magic_name__ )
result.append(__magic_name__ )
lowerCamelCase__ : Tuple = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Union[str, Any] = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : str = self.visual_projection(__magic_name__ )
lowerCamelCase__ : Optional[Any] = cosine_distance(__magic_name__ , self.special_care_embeds )
lowerCamelCase__ : Dict = cosine_distance(__magic_name__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Any = 0.0
lowerCamelCase__ : Dict = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase__ : Union[str, Any] = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase__ : Optional[Any] = special_care * 0.01
lowerCamelCase__ : Dict = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase__ : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase__ : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 96
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase_ : str = "xvjiarui/stable-diffusion-2-inpainting"
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCAmelCase_ : int = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Dict = 50
UpperCAmelCase_ : Union[str, Any] = jax.device_count()
UpperCAmelCase_ : Any = num_samples * [prompt]
UpperCAmelCase_ : int = num_samples * [init_image]
UpperCAmelCase_ : List[str] = num_samples * [mask_image]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# shard inputs and rng
UpperCAmelCase_ : List[str] = replicate(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = jax.random.split(lowerCAmelCase_ , jax.device_count() )
UpperCAmelCase_ : int = shard(lowerCAmelCase_ )
UpperCAmelCase_ : str = shard(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = shard(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = pipeline(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = output.images.reshape(lowerCAmelCase_ , 512 , 512 , 3 )
UpperCAmelCase_ : Any = images[0, 253:256, 253:256, -1]
UpperCAmelCase_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_ : Dict = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 95
|
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Any = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Optional[int] = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
__A : str = load_dataset('ashraq/esc50' )
__A : Union[str, Any] = dataset['train']['audio'][-1]['array']
__A : List[str] = audio_classifier(__lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def snake_case__ ( self ):
"""simple docstring"""
__A : Dict = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
__A : Optional[Any] = load_dataset('ashraq/esc50' )
__A : List[str] = dataset['train']['audio'][-1]['array']
__A : Union[str, Any] = audio_classifier(__lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
] , )
__A : Any = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
__A : str = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def snake_case__ ( self ):
"""simple docstring"""
pass
| 540
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__A : Optional[int] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def snake_case__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__A : Dict = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def snake_case__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__A : Union[str, Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__A : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : int = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__A : Tuple = DDPMScheduler()
__A : Optional[int] = AudioDiffusionPipeline(vqvae=__lowercase , unet=self.dummy_unet , mel=__lowercase , scheduler=__lowercase )
__A : Dict = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__A : Tuple = torch.Generator(device=__lowercase ).manual_seed(42 )
__A : Tuple = pipe(generator=__lowercase , steps=4 )
__A : Union[str, Any] = output.audios[0]
__A : Dict = output.images[0]
__A : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(42 )
__A : Optional[int] = pipe(generator=__lowercase , steps=4 , return_dict=__lowercase )
__A : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__A : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__A : List[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
__A : Optional[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__A : Optional[int] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__A : Optional[Any] = DDIMScheduler()
__A : List[Any] = self.dummy_vqvae_and_unet
__A : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowercase , scheduler=__lowercase )
__A : Dict = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
np.random.seed(0 )
__A : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__A : Dict = torch.Generator(device=__lowercase ).manual_seed(42 )
__A : List[Any] = pipe(raw_audio=__lowercase , generator=__lowercase , start_step=5 , steps=10 )
__A : Tuple = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__A : int = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__A : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__A : Any = self.dummy_unet_condition
__A : Optional[int] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowercase , mel=__lowercase , scheduler=__lowercase )
__A : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
np.random.seed(0 )
__A : Dict = torch.rand((1, 1, 10) )
__A : Any = pipe(generator=__lowercase , encoding=__lowercase )
__A : Union[str, Any] = output.images[0]
__A : List[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__A : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = torch_device
__A : Union[str, Any] = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__A : Dict = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__A : Any = torch.Generator(device=__lowercase ).manual_seed(42 )
__A : Optional[Any] = pipe(generator=__lowercase )
__A : Union[str, Any] = output.audios[0]
__A : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__A : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__A : Optional[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 540
| 1
|
'''simple docstring'''
from torch import nn
class __lowerCAmelCase ( nn.Module ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
_UpperCAmelCase : Union[str, Any] = class_size
_UpperCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_UpperCAmelCase : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_UpperCAmelCase : Tuple = self.mlp(lowerCAmelCase__ )
return logits
| 414
|
'''simple docstring'''
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ = None ):
_UpperCAmelCase : List[Any] = value
_UpperCAmelCase : Optional[int] = random()
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
def __repr__(self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__(self ):
_UpperCAmelCase : List[Any] = str(self.value ) + """ """
_UpperCAmelCase : str = str(self.left or """""" )
_UpperCAmelCase : Dict = str(self.right or """""" )
return value + left + right
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_UpperCAmelCase , _UpperCAmelCase : Dict = split(root.left , lowerCAmelCase_ )
return left, root
else:
_UpperCAmelCase , _UpperCAmelCase : List[str] = split(root.right , lowerCAmelCase_ )
return root, right
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_UpperCAmelCase : str = merge(left.right , lowerCAmelCase_ )
return left
else:
_UpperCAmelCase : List[str] = merge(lowerCAmelCase_ , right.left )
return right
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = Node(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : str = split(lowerCAmelCase_ , lowerCAmelCase_ )
return merge(merge(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase , _UpperCAmelCase : int = split(lowerCAmelCase_ , value - 1 )
_UpperCAmelCase , _UpperCAmelCase : Dict = split(lowerCAmelCase_ , lowerCAmelCase_ )
return merge(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
for arg in args.split():
if arg[0] == "+":
_UpperCAmelCase : List[str] = insert(lowerCAmelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
_UpperCAmelCase : Tuple = erase(lowerCAmelCase_ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def __A ( ):
_UpperCAmelCase : Union[str, Any] = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
_UpperCAmelCase : Tuple = input()
while args != "q":
_UpperCAmelCase : Union[str, Any] = interact_treap(lowerCAmelCase_ , lowerCAmelCase_ )
print(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 414
| 1
|
a__: int = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a__: Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
a__: Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 718
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__: Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''',__lowerCamelCase,)
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
| 212
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A : Union[str, Any] = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=10 , snake_case_=160 , snake_case_=8 , snake_case_=0.0 , snake_case_=4000 , snake_case_=False , snake_case_=True , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = padding_value
_A = sampling_rate
_A = return_attention_mask
_A = do_normalize
_A = feature_size
_A = chunk_length
_A = hop_length
def lowerCAmelCase__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self ):
_A = WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = feat_extract_first.mel_filters
_A = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = feat_extract_first.mel_filters
_A = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
_A = feature_extractor(snake_case_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' ).input_features
_A = feature_extractor(snake_case_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' ).input_features
_A = feature_extractor(snake_case_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test truncation required
_A = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
_A = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs_truncated]
_A = feature_extractor(snake_case_ , return_tensors='np' ).input_features
_A = feature_extractor(snake_case_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self ):
import torch
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = np.random.rand(100 , 32 ).astype(np.floataa )
_A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
# fmt: off
_A = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A = self._load_datasamples(1 )
_A = WhisperFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case_ , atol=1E-4 ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A = self._load_datasamples(1 )[0]
_A = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case_ )[0]
self.assertTrue(np.all(np.mean(snake_case_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ ) - 1 ) < 1E-3 ) )
| 27
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : Any = '''▁'''
UpperCamelCase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Dict = BertGenerationTokenizer
_A : List[Any] = False
_A : Union[str, Any] = True
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Dict = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = """<s>"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = """Hello World!"""
__SCREAMING_SNAKE_CASE : Dict = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__SCREAMING_SNAKE_CASE : Optional[int] = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__SCREAMING_SNAKE_CASE : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__SCREAMING_SNAKE_CASE : int = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = BertGenerationConfig()
__SCREAMING_SNAKE_CASE : Any = BertGenerationEncoder(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 178
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''t5'''
_A : Optional[Any] = ['''past_key_values''']
_A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]=3_2_1_2_8 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Dict=1_2_8 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : str=1E-6 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : Optional[Any]="relu" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : str = d_model
__SCREAMING_SNAKE_CASE : str = d_kv
__SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
__SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
__SCREAMING_SNAKE_CASE : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[int] = num_heads
__SCREAMING_SNAKE_CASE : Any = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : int = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = dropout_rate
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : Any = feed_forward_proj
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : List[str] = self.feed_forward_proj.split("""-""" )
__SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
__SCREAMING_SNAKE_CASE : int = act_info[0] == """gated"""
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__SCREAMING_SNAKE_CASE : Optional[Any] = """past_encoder_sequence + sequence"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch"""}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
return common_inputs
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return 1_3
| 178
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
A_: List[str] = namedtuple('covid_data', 'cases deaths recovered')
def __lowerCAmelCase ( _A = "https://www.worldometers.info/coronavirus/" ):
"""simple docstring"""
_lowercase = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(A_ ).content ).xpath(A_ ) )
A_: Any = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 398
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
pass
| 316
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _lowercase ( __a ):
_UpperCAmelCase = '''gptsan-japanese'''
_UpperCAmelCase = [
'''past_key_values''',
]
_UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=3_60_00 , A__=12_80 , A__=10_24 , A__=81_92 , A__=40_96 , A__=1_28 , A__=10 , A__=0 , A__=16 , A__=16 , A__=1_28 , A__=0.0 , A__=1e-5 , A__=False , A__=0.0 , A__="float32" , A__=False , A__=False , A__=False , A__=0.0_0_2 , A__=False , A__=True , A__=3_59_98 , A__=3_59_95 , A__=3_59_99 , **A__ , ) -> List[Any]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = d_ff
snake_case = d_ext
snake_case = d_spout
snake_case = num_switch_layers
snake_case = num_ext_layers
snake_case = num_switch_layers + num_ext_layers
snake_case = num_heads
snake_case = num_experts
snake_case = expert_capacity
snake_case = dropout_rate
snake_case = layer_norm_epsilon
snake_case = router_bias
snake_case = router_jitter_noise
snake_case = router_dtype
snake_case = router_ignore_padding_tokens
snake_case = output_hidden_states
snake_case = output_attentions
snake_case = initializer_factor
snake_case = output_router_logits
snake_case = use_cache
super().__init__(
separator_token_id=A__ , pad_token_id=A__ , eos_token_id=A__ , **A__ , )
| 44
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44
| 1
|
'''simple docstring'''
import re
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
lowercase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 460
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(A ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(A ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(A )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(A )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(A )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(A )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
torch.save(A , args.output )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCamelCase : Dict = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 460
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
__lowercase : Optional[int] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
__lowercase : Dict = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__snake_case = char
__snake_case = set(snake_case)
return pairs
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : str="<s>" , A_ : List[str]="</s>" , A_ : List[Any]="</s>" , A_ : List[str]="<s>" , A_ : Optional[int]="<unk>" , A_ : List[str]="<pad>" , A_ : Any="<mask>" , **A_ : int , ) -> Tuple:
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
__snake_case = vocab_file
__snake_case = merges_file
__snake_case = {}
__snake_case = 0
__snake_case = 1
__snake_case = 2
__snake_case = 3
self.add_from_file(A_ )
__snake_case = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[:-1]
__snake_case = [tuple(merge.split()[:-1] ) for merge in merges]
__snake_case = dict(zip(A_ , range(len(A_ ) ) ) )
__snake_case = {}
def lowercase ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def lowercase ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : List[Any] ) -> str:
return len(self.encoder )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : str , A_ : List[Any] ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
__snake_case = tuple(A_ )
__snake_case = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__snake_case = get_pairs(A_ )
if not pairs:
return token
while True:
__snake_case = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(A_ ):
try:
__snake_case = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(A_ )
__snake_case = new_word
if len(A_ ) == 1:
break
else:
__snake_case = get_pairs(A_ )
__snake_case = '''@@ '''.join(A_ )
__snake_case = word[:-4]
__snake_case = word
return word
def lowercase ( self : int , A_ : Tuple ) -> int:
__snake_case = []
__snake_case = re.findall(R'''\S+\n?''' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(''' ''' ) ) )
return split_tokens
def lowercase ( self : Union[str, Any] , A_ : Tuple ) -> int:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def lowercase ( self : int , A_ : Dict ) -> Optional[Any]:
return self.decoder.get(A_ , self.unk_token )
def lowercase ( self : Union[str, Any] , A_ : int ) -> Tuple:
__snake_case = ''' '''.join(A_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__snake_case = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def lowercase ( self : Optional[int] , A_ : Union[str, Any] ) -> Dict:
if isinstance(A_ , A_ ):
try:
with open(A_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
__snake_case = f.readlines()
for lineTmp in lines:
__snake_case = lineTmp.strip()
__snake_case = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
__snake_case = line[:idx]
__snake_case = len(self.encoder )
| 93
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __A :
def __init__( self : str , __snake_case : int , __snake_case : Dict=1_3 , __snake_case : Dict=7 , __snake_case : str=False , __snake_case : Dict=True , __snake_case : Tuple=False , __snake_case : List[Any]=False , __snake_case : Dict=1_9 , __snake_case : Dict=3_2 , __snake_case : Union[str, Any]=5 , __snake_case : List[Any]=4 , __snake_case : List[Any]=3_7 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : List[str]=5_1_2 , __snake_case : List[str]=1_6 , __snake_case : Tuple=2 , __snake_case : Optional[Any]=0.02 , __snake_case : str=3 , __snake_case : List[str]=4 , __snake_case : Optional[int]=None , ) -> Optional[int]:
__magic_name__: Any = parent
__magic_name__: Optional[Any] = batch_size
__magic_name__: Optional[Any] = seq_length
__magic_name__: Optional[Any] = is_training
__magic_name__: str = use_input_mask
__magic_name__: Optional[Any] = use_token_type_ids
__magic_name__: List[str] = use_labels
__magic_name__: List[str] = vocab_size
__magic_name__: Union[str, Any] = hidden_size
__magic_name__: List[Any] = num_hidden_layers
__magic_name__: Dict = num_attention_heads
__magic_name__: int = intermediate_size
__magic_name__: List[Any] = hidden_act
__magic_name__: int = hidden_dropout_prob
__magic_name__: List[str] = attention_probs_dropout_prob
__magic_name__: Tuple = max_position_embeddings
__magic_name__: Any = type_vocab_size
__magic_name__: Union[str, Any] = type_sequence_label_size
__magic_name__: List[str] = initializer_range
__magic_name__: Optional[Any] = num_labels
__magic_name__: List[str] = num_choices
__magic_name__: Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__: Dict = None
if self.use_input_mask:
__magic_name__: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__: str = None
__magic_name__: Optional[Any] = None
__magic_name__: int = None
if self.use_labels:
__magic_name__: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__: Dict = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__: Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : int ) -> Any:
__magic_name__: List[Any] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] ) -> Optional[Any]:
__magic_name__: str = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
__magic_name__: str = model(__snake_case , attention_mask=__snake_case )
__magic_name__: Any = model(__snake_case )
__magic_name__: Dict = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase__ ( self : Any ) -> int:
__magic_name__: str = self.prepare_config_and_inputs()
(
(
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
),
): Union[str, Any] = config_and_inputs
__magic_name__: Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = False
UpperCAmelCase__ = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ = ()
UpperCAmelCase__ = {} if is_torch_available() else {}
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Optional[int] = EsmFoldModelTester(self )
__magic_name__: Optional[Any] = ConfigTester(self , config_class=__snake_case , hidden_size=3_7 )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> str:
__magic_name__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip("""Does not support attention outputs""" )
def lowerCamelCase__ ( self : List[str] ) -> str:
pass
@unittest.skip
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self : str ) -> str:
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : Any ) -> List[str]:
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : int ) -> Tuple:
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self : Any ) -> int:
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCamelCase__ ( self : int ) -> Any:
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCamelCase__ ( self : List[str] ) -> str:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
pass
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
__magic_name__: Any = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
__magic_name__: Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__magic_name__: Dict = model(__snake_case )["""positions"""]
__magic_name__: Tuple = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1E-4 ) )
| 96
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowercase : Union[str, Any] = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : Tuple , ) -> None:
super().__init__(**A_ )
__snake_case = size if size is not None else {'''shortest_edge''': 256}
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Optional[int] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ) -> np.ndarray:
__snake_case = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__snake_case = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
__snake_case = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase ( self : Dict , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase ( self : Optional[int] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowercase ( self : Any , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : Dict , ) -> Union[str, Any]:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(A_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__snake_case = [to_channel_dimension_format(A_ , A_ ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def lowercase ( self : int , A_ : Dict , A_ : List[Tuple] = None ) -> Any:
__snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__snake_case = target_sizes.numpy()
__snake_case = []
for idx in range(len(A_ ) ):
__snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__snake_case = logits.argmax(dim=1 )
__snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 93
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : int = logging.get_logger(__name__)
__lowercase : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowercase : List[Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowercase : List[str] = {"facebook/blenderbot-3B": 128}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : str = BlenderbotTokenizer
def __init__( self : Union[str, Any] , A_ : Any=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Union[str, Any]="replace" , A_ : Union[str, Any]="<s>" , A_ : Union[str, Any]="</s>" , A_ : Optional[int]="</s>" , A_ : List[Any]="<s>" , A_ : Union[str, Any]="<unk>" , A_ : Any="<pad>" , A_ : List[str]="<mask>" , A_ : int=False , A_ : Tuple=True , **A_ : Optional[Any] , ) -> int:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__snake_case = getattr(A_ , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**A_ )
__snake_case = add_prefix_space
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , A_ ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(A_ , state.pop('''type''' ) )
__snake_case = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : int , A_ : Union[str, Any] ) -> List[Any]:
__snake_case = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__snake_case = value
def lowercase ( self : Any , *A_ : List[str] , **A_ : Optional[int] ) -> BatchEncoding:
__snake_case = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def lowercase ( self : str , *A_ : Tuple , **A_ : str ) -> BatchEncoding:
__snake_case = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def lowercase ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def lowercase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def lowercase ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
__snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
__snake_case = ''' '''.join(A_ )
__snake_case = self.encode(A_ )
if len(A_ ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 93
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase_ = True
except (ImportError, AttributeError):
UpperCamelCase_ = object
def _UpperCAmelCase ( *A , **A ):
'''simple docstring'''
pass
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger('transformers-cli/serving')
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A , args.host , args.port , args.workers )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
class snake_case_ ( a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( A_ ) -> Any:
UpperCAmelCase__ =parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task", type=A_, choices=get_supported_tasks(), help="The task to run the pipeline on", )
serve_parser.add_argument("--host", type=A_, default="localhost", help="Interface the server will listen on." )
serve_parser.add_argument("--port", type=A_, default=8888, help="Port the serving will listen to." )
serve_parser.add_argument("--workers", type=A_, default=1, help="Number of http workers" )
serve_parser.add_argument("--model", type=A_, help="Model's name or path to stored model." )
serve_parser.add_argument("--config", type=A_, help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer", type=A_, help="Tokenizer name to use." )
serve_parser.add_argument(
"--device", type=A_, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", )
serve_parser.set_defaults(func=A_ )
def __init__( self, A_, A_, A_, A_ ) -> str:
UpperCAmelCase__ =pipeline
UpperCAmelCase__ =host
UpperCAmelCase__ =port
UpperCAmelCase__ =workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f"""Serving model over {host}:{port}""" )
UpperCAmelCase__ =FastAPI(
routes=[
APIRoute(
"/", self.model_info, response_model=A_, response_class=A_, methods=["GET"], ),
APIRoute(
"/tokenize", self.tokenize, response_model=A_, response_class=A_, methods=["POST"], ),
APIRoute(
"/detokenize", self.detokenize, response_model=A_, response_class=A_, methods=["POST"], ),
APIRoute(
"/forward", self.forward, response_model=A_, response_class=A_, methods=["POST"], ),
], timeout=600, )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
run(self._app, host=self.host, port=self.port, workers=self.workers )
def __UpperCAmelCase ( self ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __UpperCAmelCase ( self, A_ = Body(A_, embed=A_ ), A_ = Body(A_, embed=A_ ) ) -> Any:
try:
UpperCAmelCase__ =self._pipeline.tokenizer.tokenize(A_ )
if return_ids:
UpperCAmelCase__ =self._pipeline.tokenizer.convert_tokens_to_ids(A_ )
return ServeTokenizeResult(tokens=A_, tokens_ids=A_ )
else:
return ServeTokenizeResult(tokens=A_ )
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(A_ )} )
def __UpperCAmelCase ( self, A_ = Body(A_, embed=A_ ), A_ = Body(A_, embed=A_ ), A_ = Body(A_, embed=A_ ), ) -> Optional[int]:
try:
UpperCAmelCase__ =self._pipeline.tokenizer.decode(A_, A_, A_ )
return ServeDeTokenizeResult(model="", text=A_ )
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(A_ )} )
async def __UpperCAmelCase ( self, A_=Body(A_, embed=A_ ) ) -> int:
# Check we don't have empty string
if len(A_ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
UpperCAmelCase__ =self._pipeline(A_ )
return ServeForwardResult(output=A_ )
except Exception as e:
raise HTTPException(500, {"error": str(A_ )} )
| 625
|
from __future__ import annotations
import requests
UpperCamelCase_ = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def _UpperCAmelCase ( A , A = 1 , A = "new" , A = None ):
'''simple docstring'''
UpperCAmelCase__ =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A ) - valid_terms ) ):
UpperCAmelCase__ =F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(A )
UpperCAmelCase__ =requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
UpperCAmelCase__ =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A )}
UpperCAmelCase__ ={}
for id_ in range(A ):
UpperCAmelCase__ ={
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 625
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '''▁'''
_lowerCamelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
_lowerCamelCase = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
_lowerCamelCase = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
_lowerCamelCase = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
_lowerCamelCase = {'''mustc''': MUSTC_LANGS}
class _SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = MAX_MODEL_INPUT_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
lowerCAmelCase = []
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]="<s>" , UpperCamelCase : List[Any]="</s>" , UpperCamelCase : Any="<pad>" , UpperCamelCase : Tuple="<unk>" , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=False , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any] = None , **UpperCamelCase : Union[str, Any] , )->None:
__SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , do_upper_case=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , lang_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = do_upper_case
__SCREAMING_SNAKE_CASE : List[Any] = do_lower_case
__SCREAMING_SNAKE_CASE : List[str] = load_json(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Tuple = spm_file
__SCREAMING_SNAKE_CASE : int = load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
__SCREAMING_SNAKE_CASE : List[Any] = lang_codes
__SCREAMING_SNAKE_CASE : Any = LANGUAGES[lang_codes]
__SCREAMING_SNAKE_CASE : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
__SCREAMING_SNAKE_CASE : Tuple = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__SCREAMING_SNAKE_CASE : Dict = self.lang_tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__SCREAMING_SNAKE_CASE : Tuple = {}
@property
def __snake_case ( self : Union[str, Any] )->int:
return len(self.encoder )
@property
def __snake_case ( self : List[str] )->str:
return self._tgt_lang
@tgt_lang.setter
def __snake_case ( self : str , UpperCamelCase : int )->None:
__SCREAMING_SNAKE_CASE : List[str] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase__ )
def __snake_case ( self : Optional[Any] , UpperCamelCase : List[str] )->None:
__SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[tgt_lang]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [lang_code_id]
def __snake_case ( self : Any , UpperCamelCase : Optional[int] )->List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __snake_case ( self : Dict , UpperCamelCase : List[Any] )->Any:
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def __snake_case ( self : List[Any] , UpperCamelCase : Tuple )->str:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __snake_case ( self : Optional[int] , UpperCamelCase : str )->str:
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.decode(lowerCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__SCREAMING_SNAKE_CASE : Any = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.decode(lowerCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any]=None )->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self : int , UpperCamelCase : Any , UpperCamelCase : List[Any] = None , UpperCamelCase : Any = False )->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [1] * len(self.prefix_tokens )
__SCREAMING_SNAKE_CASE : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __snake_case ( self : List[str] )->Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any )->Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Dict = None
return state
def __setstate__( self : Tuple , UpperCamelCase : Dict )->None:
__SCREAMING_SNAKE_CASE : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def __snake_case ( self : Any , UpperCamelCase : List[str] , UpperCamelCase : Tuple = None )->Tuple[str]:
__SCREAMING_SNAKE_CASE : str = Path(lowerCAmelCase__ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__SCREAMING_SNAKE_CASE : Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__SCREAMING_SNAKE_CASE : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
__SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = sentencepiece.SentencePieceProcessor(**_lowercase )
spm.Load(str(_lowercase ) )
return spm
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
with open(_lowercase , "r" ) as f:
return json.load(_lowercase )
def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ):
"""simple docstring"""
with open(_lowercase , "w" ) as f:
json.dump(_lowercase , _lowercase , indent=2 )
| 709
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client("iam" )
__SCREAMING_SNAKE_CASE : List[Any] = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase , AssumeRolePolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) )
__SCREAMING_SNAKE_CASE : str = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = botoa.client("iam" )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , __lowerCamelCase , )
__SCREAMING_SNAKE_CASE : str = None
if credentials_configuration == 0:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
__SCREAMING_SNAKE_CASE : Any = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
__SCREAMING_SNAKE_CASE : Dict = _ask_field("AWS Access Key ID: " )
__SCREAMING_SNAKE_CASE : Union[str, Any] = aws_access_key_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field("AWS Secret Access Key: " )
__SCREAMING_SNAKE_CASE : Optional[int] = aws_secret_access_key
__SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
__SCREAMING_SNAKE_CASE : Any = aws_region
__SCREAMING_SNAKE_CASE : int = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , __lowerCamelCase , )
if role_management == 0:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field("Enter your IAM role name: " )
else:
__SCREAMING_SNAKE_CASE : Any = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Tuple = None
if is_custom_docker_image:
__SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your Docker image: " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : List[Any] = None
if is_sagemaker_inputs_enabled:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : int = None
if is_sagemaker_metrics_enabled:
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
__SCREAMING_SNAKE_CASE : Any = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : str = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
if use_dynamo:
__SCREAMING_SNAKE_CASE : Optional[Any] = "dynamo_"
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
if use_custom_options:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Which mode do you want to use?" , __lowerCamelCase , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )] , default="default" , )
__SCREAMING_SNAKE_CASE : str = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Optional[int] = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
__SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
__lowerCamelCase , __lowerCamelCase , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(__lowerCamelCase , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , default="ml.p3.2xlarge" )
__SCREAMING_SNAKE_CASE : List[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"How many machines do you want use? [1]: " , __lowerCamelCase , default=1 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=__lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCamelCase , use_cpu=__lowerCamelCase , dynamo_config=__lowerCamelCase , eca_instance_type=__lowerCamelCase , profile=__lowerCamelCase , region=__lowerCamelCase , iam_role_name=__lowerCamelCase , mixed_precision=__lowerCamelCase , num_machines=__lowerCamelCase , sagemaker_inputs_file=__lowerCamelCase , sagemaker_metrics_file=__lowerCamelCase , )
| 447
| 0
|
from __future__ import annotations
UpperCamelCase = 8.9_88e9 # units = N * m^s * C^-2
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> dict[str, float]:
__UpperCamelCase : Dict = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
__UpperCamelCase : List[str] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCamelCase : Optional[Any] = abs(__lowerCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCamelCase : List[Any] = abs(__lowerCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCamelCase : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(__lowerCAmelCase )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 269
| 1
|
'''simple docstring'''
__UpperCamelCase : Tuple = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 270
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 270
| 1
|
def _UpperCAmelCase ( a : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
snake_case__ = [True] * (num + 1)
snake_case__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a ):
snake_case__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 654
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : List[Any] = ['pixel_values']
def __init__( self :List[Any] , __magic_name__ :bool = True , __magic_name__ :Optional[Dict[str, int]] = None , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = True , __magic_name__ :Union[int, float] = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , **__magic_name__ :Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**__magic_name__ )
a__ = size if size is not None else {'''shortest_edge''': 256}
a__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
a__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a__ = get_size_dict(__magic_name__ , param_name='''crop_size''' )
a__ = do_resize
a__ = size
a__ = resample
a__ = do_center_crop
a__ = crop_size
a__ = do_rescale
a__ = rescale_factor
a__ = do_normalize
a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self :str , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :str , ) -> np.ndarray:
'''simple docstring'''
a__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
a__ = get_resize_output_image_size(__magic_name__ , size=size['''shortest_edge'''] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _UpperCamelCase ( self :List[str] , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
a__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__magic_name__ , size=(size['''height'''], size['''width''']) , data_format=__magic_name__ , **__magic_name__ )
def _UpperCamelCase ( self :Optional[int] , __magic_name__ :np.ndarray , __magic_name__ :float , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Any ) -> np.ndarray:
'''simple docstring'''
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _UpperCamelCase ( self :List[str] , __magic_name__ :np.ndarray , __magic_name__ :Union[float, List[float]] , __magic_name__ :Union[float, List[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _UpperCamelCase ( self :str , __magic_name__ :ImageInput , __magic_name__ :Optional[bool] = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[float] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ :List[Any] , ) -> List[Any]:
'''simple docstring'''
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
a__ = resample if resample is not None else self.resample
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__magic_name__ , param_name='''crop_size''' )
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
a__ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
a__ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
a__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
a__ = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def _UpperCamelCase ( self :int , __magic_name__ :int , __magic_name__ :List[Tuple] = None ) -> Any:
'''simple docstring'''
a__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__magic_name__ ):
a__ = target_sizes.numpy()
a__ = []
for idx in range(len(__magic_name__ ) ):
a__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__magic_name__ )
a__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
a__ = logits.argmax(dim=1 )
a__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 711
|
"""simple docstring"""
import math
import qiskit
def __snake_case ( UpperCamelCase = 1 , UpperCamelCase = 1 , UpperCamelCase = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
a__ = qiskit.QuantumRegister(4 , '''qr''' )
a__ = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
a__ = [input_a, input_a, carry_in]
a__ = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase ) # measure the last two qbits
a__ = qiskit.Aer.get_backend('''aer_simulator''' )
a__ = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1_000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 158
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _a (lowerCAmelCase__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'wavlm'
def __init__( self , A__=32 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=0.1 , A__=0.02 , A__=1E-5 , A__="group" , A__="gelu" , A__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A__=(5, 2, 2, 2, 2, 2, 2) , A__=(10, 3, 3, 3, 3, 2, 2) , A__=False , A__=1_28 , A__=16 , A__=3_20 , A__=8_00 , A__=False , A__=True , A__=0.05 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=3_20 , A__=2 , A__=0.1 , A__=1_00 , A__=2_56 , A__=2_56 , A__=0.1 , A__="mean" , A__=False , A__=False , A__=2_56 , A__=(5_12, 5_12, 5_12, 5_12, 15_00) , A__=(5, 3, 3, 1, 1) , A__=(1, 2, 3, 1, 1) , A__=5_12 , A__=80 , A__=0 , A__=1 , A__=2 , A__=False , A__=3 , A__=2 , A__=3 , A__=None , **A__ , ) -> Any:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = feat_extract_norm
_SCREAMING_SNAKE_CASE = feat_extract_activation
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = conv_bias
_SCREAMING_SNAKE_CASE = num_buckets
_SCREAMING_SNAKE_CASE = max_bucket_distance
_SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE = len(self.conv_dim )
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = feat_proj_dropout
_SCREAMING_SNAKE_CASE = final_dropout
_SCREAMING_SNAKE_CASE = layerdrop
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_ctc_classes
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = do_stable_layer_norm
_SCREAMING_SNAKE_CASE = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE = apply_spec_augment
_SCREAMING_SNAKE_CASE = mask_time_prob
_SCREAMING_SNAKE_CASE = mask_time_length
_SCREAMING_SNAKE_CASE = mask_time_min_masks
_SCREAMING_SNAKE_CASE = mask_feature_prob
_SCREAMING_SNAKE_CASE = mask_feature_length
# parameters for pretraining with codevector quantized representations
_SCREAMING_SNAKE_CASE = num_codevectors_per_group
_SCREAMING_SNAKE_CASE = num_codevector_groups
_SCREAMING_SNAKE_CASE = contrastive_logits_temperature
_SCREAMING_SNAKE_CASE = num_negatives
_SCREAMING_SNAKE_CASE = codevector_dim
_SCREAMING_SNAKE_CASE = proj_codevector_dim
_SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
_SCREAMING_SNAKE_CASE = ctc_loss_reduction
_SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
_SCREAMING_SNAKE_CASE = add_adapter
_SCREAMING_SNAKE_CASE = adapter_kernel_size
_SCREAMING_SNAKE_CASE = adapter_stride
_SCREAMING_SNAKE_CASE = num_adapter_layers
_SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCamelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 591
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74
| 0
|
__A : Any ={
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 708
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__A ="facebook/wmt19-en-de"
__A =FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__A =FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__A =FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__A =tokenizer(["Making tiny model"], return_tensors="pt")
__A =tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
__A ="tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 241
| 0
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
| 1
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowercase_ = logging.getLogger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Dict ="summarization"
lowerCamelCase__ : Dict =["loss"]
lowerCamelCase__ : Union[str, Any] =ROUGE_KEYS
lowerCamelCase__ : Optional[int] ="rouge2"
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> int:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__magic_name__ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(lowerCamelCase , num_labels=lowerCamelCase , mode=self.mode , **lowerCamelCase )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
__magic_name__ : Optional[int] = Path(self.output_dir ) / '''metrics.json'''
__magic_name__ : Tuple = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
__magic_name__ : Optional[int] = 0
__magic_name__ : Any = defaultdict(lowerCamelCase )
__magic_name__ : int = self.config.model_type
__magic_name__ : Any = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
__magic_name__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__magic_name__ : Tuple = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
__magic_name__ : List[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__magic_name__ : Dict = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__magic_name__ : Optional[Any] = get_git_info()['''repo_sha''']
__magic_name__ : Optional[Any] = hparams.num_workers
__magic_name__ : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase ):
__magic_name__ : str = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__magic_name__ : Tuple = self.decoder_start_token_id
__magic_name__ : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
__magic_name__ : Any = False
__magic_name__ : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__magic_name__ : Any = self.hparams.eval_max_gen_length
else:
__magic_name__ : str = self.model.config.max_length
__magic_name__ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase ( self , lowerCamelCase ) -> Dict[str, List[str]]:
"""simple docstring"""
__magic_name__ : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
__magic_name__ : List[Any] = True
return readable_batch
def lowercase ( self , lowerCamelCase , **lowerCamelCase ) -> str:
"""simple docstring"""
return self.model(lowerCamelCase , **lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : Any = self.tokenizer.batch_decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
return lmap(str.strip , lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : List[Any] = self.tokenizer.pad_token_id
__magic_name__ , __magic_name__ : Any = batch['''input_ids'''], batch['''attention_mask''']
__magic_name__ : Union[str, Any] = batch['''labels''']
if isinstance(self.model , lowerCamelCase ):
__magic_name__ : Optional[Any] = self.model._shift_right(lowerCamelCase )
else:
__magic_name__ : str = shift_tokens_right(lowerCamelCase , lowerCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__magic_name__ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase )
__magic_name__ : Union[str, Any] = self(lowerCamelCase , attention_mask=lowerCamelCase , decoder_input_ids=lowerCamelCase , use_cache=lowerCamelCase )
__magic_name__ : str = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__magic_name__ : List[Any] = nn.CrossEntropyLoss(ignore_index=lowerCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
__magic_name__ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__magic_name__ : Optional[Any] = nn.functional.log_softmax(lowerCamelCase , dim=-1 )
__magic_name__ , __magic_name__ : Dict = label_smoothed_nll_loss(
lowerCamelCase , lowerCamelCase , self.hparams.label_smoothing , ignore_index=lowerCamelCase )
return (loss,)
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = self._step(lowerCamelCase )
__magic_name__ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase ) )
# tokens per batch
__magic_name__ : Any = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
__magic_name__ : List[str] = batch['''input_ids'''].shape[0]
__magic_name__ : List[Any] = batch['''input_ids'''].eq(self.pad ).sum()
__magic_name__ : List[str] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
return self._generative_step(lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
__magic_name__ : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__magic_name__ : int = losses['''loss''']
__magic_name__ : Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
__magic_name__ : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__magic_name__ : torch.FloatTensor = torch.tensor(lowerCamelCase ).type_as(lowerCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase )
__magic_name__ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
__magic_name__ : Tuple = self.step_count
self.metrics[prefix].append(lowerCamelCase ) # callback writes this to self.metrics_save_path
__magic_name__ : Union[str, Any] = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
return calculate_rouge(lowerCamelCase , lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> dict:
"""simple docstring"""
__magic_name__ : int = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__magic_name__ : Any = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__magic_name__ : Tuple = (time.time() - ta) / batch['''input_ids'''].shape[0]
__magic_name__ : List[str] = self.ids_to_clean_text(lowerCamelCase )
__magic_name__ : List[str] = self.ids_to_clean_text(batch['''labels'''] )
__magic_name__ : str = self._step(lowerCamelCase )
__magic_name__ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase ) )
__magic_name__ : Dict = self.calc_generative_metrics(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[int] = np.mean(lmap(lowerCamelCase , lowerCamelCase ) )
base_metrics.update(gen_time=lowerCamelCase , gen_len=lowerCamelCase , preds=lowerCamelCase , target=lowerCamelCase , **lowerCamelCase )
return base_metrics
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return self._generative_step(lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.validation_epoch_end(lowerCamelCase , prefix='''test''' )
def lowercase ( self , lowerCamelCase ) -> SeqaSeqDataset:
"""simple docstring"""
__magic_name__ : Dict = self.n_obs[type_path]
__magic_name__ : Dict = self.target_lens[type_path]
__magic_name__ : str = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase , n_obs=lowerCamelCase , max_target_length=lowerCamelCase , **self.dataset_kwargs , )
return dataset
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> DataLoader:
"""simple docstring"""
__magic_name__ : List[str] = self.get_dataset(lowerCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__magic_name__ : str = dataset.make_sortish_sampler(lowerCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase , batch_size=lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase , num_workers=self.num_workers , sampler=lowerCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__magic_name__ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase , batch_sampler=lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase , batch_size=lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase , num_workers=self.num_workers , sampler=lowerCamelCase , )
def lowercase ( self ) -> DataLoader:
"""simple docstring"""
__magic_name__ : Optional[int] = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase )
return dataloader
def lowercase ( self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def lowercase ( self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase ( lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCamelCase , lowerCamelCase )
add_generic_args(lowerCamelCase , lowerCamelCase )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=lowerCamelCase )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=lowerCamelCase )
parser.add_argument('''--max_tokens_per_batch''' , type=lowerCamelCase , default=lowerCamelCase )
parser.add_argument('''--logger_name''' , type=lowerCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=lowerCamelCase , default=-1 , required=lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=lowerCamelCase , default=500 , required=lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=lowerCamelCase , default=-1 , required=lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=lowerCamelCase , default='''summarization''' , required=lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=lowerCamelCase , default=0.0 , required=lowerCamelCase )
parser.add_argument('''--src_lang''' , type=lowerCamelCase , default='''''' , required=lowerCamelCase )
parser.add_argument('''--tgt_lang''' , type=lowerCamelCase , default='''''' , required=lowerCamelCase )
parser.add_argument('''--eval_beams''' , type=lowerCamelCase , default=lowerCamelCase , required=lowerCamelCase )
parser.add_argument(
'''--val_metric''' , type=lowerCamelCase , default=lowerCamelCase , required=lowerCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=lowerCamelCase , default=lowerCamelCase , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=lowerCamelCase , default=1 , required=lowerCamelCase , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=lowerCamelCase , default=-1 , required=lowerCamelCase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Tuple ="translation"
lowerCamelCase__ : str =["loss"]
lowerCamelCase__ : Optional[int] =["bleu"]
lowerCamelCase__ : str ="bleu"
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowerCamelCase , **lowerCamelCase )
__magic_name__ : int = hparams.src_lang
__magic_name__ : Union[str, Any] = hparams.tgt_lang
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> dict:
"""simple docstring"""
return calculate_bleu(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase=None ) ->SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase )
check_output_dir(UpperCAmelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
__magic_name__ : SummarizationModule = SummarizationModule(UpperCAmelCase )
else:
__magic_name__ : SummarizationModule = TranslationModule(UpperCAmelCase )
__magic_name__ : Optional[Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
__magic_name__ : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__magic_name__ : List[str] = os.environ.get('''WANDB_PROJECT''', UpperCAmelCase )
__magic_name__ : Optional[Any] = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__magic_name__ : Any = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
__magic_name__ : Tuple = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
__magic_name__ : Tuple = False
__magic_name__ : int = args.val_metric == '''loss'''
__magic_name__ : pl.Trainer = generic_train(
UpperCAmelCase, UpperCAmelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase ), early_stopping_callback=UpperCAmelCase, logger=UpperCAmelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
__magic_name__ : Union[str, Any] = ''''''
__magic_name__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=UpperCAmelCase ) )
if checkpoints:
__magic_name__ : str = checkpoints[-1]
__magic_name__ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
lowercase_ = pl.Trainer.add_argparse_args(parser)
lowercase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowercase_ = parser.parse_args()
main(args)
| 336
|
import math
import random
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = False ) ->float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.0_2
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
__magic_name__ : Optional[int] = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(UpperCAmelCase ):
# Forward propagation
__magic_name__ : Optional[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__magic_name__ : Optional[int] = (expected / 100) - layer_a
# Error delta
__magic_name__ : Tuple = layer_1_error * sigmoid_function(UpperCAmelCase, UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Expected value: '''))
lowercase_ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 336
| 1
|
import math
from datetime import datetime, timedelta
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Tuple = year % 19
UpperCamelCase_ : Optional[int] = year % 4
UpperCamelCase_ : int = year % 7
UpperCamelCase_ : str = math.floor(year / 100 )
UpperCamelCase_ : Optional[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase_ : Tuple = leap_day_inhibits / 4
UpperCamelCase_ : Optional[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ : List[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ : str = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE : int = "will be" if year > datetime.now().year else "was"
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 635
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class A ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 464
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = 32
__A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase_ , num_layers=1 , )
torch.manual_seed(0 )
__A = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__A = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ )
__A = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , )
torch.manual_seed(0 )
__A = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__A = AutoencoderKL()
__A = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(UpperCamelCase_ ).startswith("""mps""" ):
__A = torch.manual_seed(UpperCamelCase_ )
else:
__A = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A = torch.Generator(device="""cpu""" ).manual_seed(0 )
__A = pipe("""anime turle""" , generator=UpperCamelCase_ , output_type="""np""" )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__A = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 199
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 199
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : torch.FloatTensor
_lowercase : torch.FloatTensor
_lowercase : Optional[torch.FloatTensor] = None
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = 2
@register_to_config
def __init__( self , _lowercase = 0.02 , _lowercase = 100 , _lowercase = 1.007 , _lowercase = 80 , _lowercase = 0.05 , _lowercase = 50 , ):
"""simple docstring"""
_lowerCAmelCase = sigma_max
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None # sigma(t_i)
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
return sample
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
_lowerCAmelCase = torch.from_numpy(_lowercase ).to(_lowercase )
_lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase = torch.tensor(_lowercase , dtype=torch.floataa , device=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase = self.config.s_noise * randn_tensor(sample.shape , generator=_lowercase ).to(sample.device )
_lowerCAmelCase = sigma + gamma * sigma
_lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = sample_hat + sigma_hat * model_output
_lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = sample_prev + sigma_prev * model_output
_lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
raise NotImplementedError()
| 5
|
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : List[str] = 16
lowercase__ : Optional[Any] = 32
def __lowercase ( _a , _a = 16 ):
snake_case_ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : Dict = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Optional[Any] = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Optional[Any] = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
_a , padding='''longest''' , max_length=_a , pad_to_multiple_of=_a , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=_a )
snake_case_ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def __lowercase ( _a , _a ):
# Initialize accelerator
snake_case_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Union[str, Any] = config['''lr''']
snake_case_ : str = int(config['''num_epochs'''] )
snake_case_ : Optional[Any] = int(config['''seed'''] )
snake_case_ : Tuple = int(config['''batch_size'''] )
snake_case_ : str = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Any = MAX_GPU_BATCH_SIZE
set_seed(_a )
snake_case_, snake_case_ : Union[str, Any] = get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
snake_case_ : Any = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=100 , num_training_steps=(len(_a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**_a )
snake_case_ : Optional[int] = outputs.loss
snake_case_ : int = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**_a )
snake_case_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_a , references=_a , )
snake_case_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a )
def __lowercase ( ):
snake_case_ : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_a , default=_a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : int = parser.parse_args()
snake_case_ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 123
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase__ ( _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = filter(lambda _UpperCamelCase: p.requires_grad , model.parameters())
UpperCamelCase = sum([np.prod(p.size()) for p in model_parameters])
return params
__magic_name__ : Optional[Any] = logging.getLogger(__name__)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Any:
"""simple docstring"""
if metric == "rouge2":
UpperCamelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
UpperCamelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
UpperCamelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.')
UpperCamelCase = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=__snake_case , verbose=__snake_case , )
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple=True ):
"""simple docstring"""
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase = od / 'test_results.txt'
UpperCamelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
UpperCamelCase = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
UpperCamelCase = val.item()
UpperCamelCase = f'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
try:
UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase = pl_module.model.num_parameters()
UpperCamelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 705
|
__magic_name__ : List[str] = tuple[float, float, float]
__magic_name__ : Optional[int] = tuple[float, float, float]
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = end_pointa[0] - end_pointa[0]
UpperCamelCase = end_pointa[1] - end_pointa[1]
UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return tuple(round(_UpperCamelCase , _UpperCamelCase) for x in vector) == (0, 0, 0)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10) -> bool:
"""simple docstring"""
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
return is_zero_vector(get_ad_vectors_cross(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase)
| 410
| 0
|
def _a ( lowerCamelCase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[str] = len(lowercase_ ) # No of vertices in graph
lowerCamelCase : Dict = [0] * n
lowerCamelCase : Optional[Any] = [False] * n
def dfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = True
lowerCamelCase : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowercase_, lowercase_, lowercase_, id_ )
lowerCamelCase : List[str] = min(low[at], low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase : Union[str, Any] = min(low[at], low[to] )
lowerCamelCase : Optional[int] = []
for i in range(lowercase_ ):
if not visited[i]:
dfs(lowercase_, -1, lowercase_, id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__a : Optional[Any] = """facebook/wmt19-en-de"""
__a : Union[str, Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__a : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__a : Optional[Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__a : Optional[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
__a : Union[str, Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 606
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
lowercase = []
lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase = int(max(0 , i - limit ) )
lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
lowercase = F"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
lowercase = get_matched_characters(lowercase_ , lowercase_ )
lowercase = get_matched_characters(lowercase_ , lowercase_ )
lowercase = len(lowercase_ )
# transposition
lowercase = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
lowercase = 0.0
else:
lowercase = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 653
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_A : str = False
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self ):
return 12
@property
def __magic_name__ ( self ):
return 12
@property
def __magic_name__ ( self ):
return 32
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __magic_name__ ( self ):
lowercase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_a )
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : Optional[Any] = 12
lowercase : Optional[int] = 12
lowercase : str = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
lowercase : Tuple = TransformeraDModel(**_a )
return model
def __magic_name__ ( self ):
lowercase : Tuple = "cpu"
lowercase : Any = self.dummy_vqvae
lowercase : Optional[int] = self.dummy_text_encoder
lowercase : int = self.dummy_tokenizer
lowercase : Dict = self.dummy_transformer
lowercase : Tuple = VQDiffusionScheduler(self.num_embed )
lowercase : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=_a )
lowercase : List[Any] = VQDiffusionPipeline(
vqvae=_a , text_encoder=_a , tokenizer=_a , transformer=_a , scheduler=_a , learned_classifier_free_sampling_embeddings=_a , )
lowercase : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase : Union[str, Any] = "teddy bear playing in the pool"
lowercase : str = torch.Generator(device=_a ).manual_seed(0 )
lowercase : List[str] = pipe([prompt] , generator=_a , num_inference_steps=2 , output_type="np" )
lowercase : str = output.images
lowercase : Optional[Any] = torch.Generator(device=_a ).manual_seed(0 )
lowercase : List[Any] = pipe(
[prompt] , generator=_a , output_type="np" , return_dict=_a , num_inference_steps=2 )[0]
lowercase : Dict = image[0, -3:, -3:, -1]
lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase : Any = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self ):
lowercase : Optional[Any] = "cpu"
lowercase : Optional[Any] = self.dummy_vqvae
lowercase : Optional[int] = self.dummy_text_encoder
lowercase : int = self.dummy_tokenizer
lowercase : int = self.dummy_transformer
lowercase : Dict = VQDiffusionScheduler(self.num_embed )
lowercase : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_a , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase : Tuple = VQDiffusionPipeline(
vqvae=_a , text_encoder=_a , tokenizer=_a , transformer=_a , scheduler=_a , learned_classifier_free_sampling_embeddings=_a , )
lowercase : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowercase : Any = "teddy bear playing in the pool"
lowercase : Dict = torch.Generator(device=_a ).manual_seed(0 )
lowercase : Dict = pipe([prompt] , generator=_a , num_inference_steps=2 , output_type="np" )
lowercase : Tuple = output.images
lowercase : Dict = torch.Generator(device=_a ).manual_seed(0 )
lowercase : Union[str, Any] = pipe(
[prompt] , generator=_a , output_type="np" , return_dict=_a , num_inference_steps=2 )[0]
lowercase : Optional[int] = image[0, -3:, -3:, -1]
lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase : List[str] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
lowercase : Union[str, Any] = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
lowercase : Any = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase : int = torch.Generator(device=_a ).manual_seed(0 )
lowercase : Union[str, Any] = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_a , output_type="np" , )
lowercase : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 361
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_a ).to(_a )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase : Optional[Any] = tokenizer("Hello there" , return_tensors="pt" ).input_ids
lowercase : Union[str, Any] = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
lowercase : int = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss
lowercase : str = -(labels.shape[-1] * loss.item())
lowercase : List[str] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 361
| 1
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Any = 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Optional[int] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ : str = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : str = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
a_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '''fake-roberta''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
a_ : List[str] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(type(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register('''model''' , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoConfig.register('''bert''' , __SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
a_ : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
a_ : List[Any] = AutoConfig.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a_ : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
a_ : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "new-model"
try:
AutoConfig.register('''new-model''' , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
a_ : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
a_ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
a_ : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 666
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Any =os.path.join(args.tf_model_dir , "parameters.json" )
lowerCamelCase__: Any =json.loads(open(__a ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
lowerCamelCase__: Union[str, Any] =args.output + ".pt"
lowerCamelCase__: str =OrderedDict()
with tf.device("/CPU:0" ):
lowerCamelCase__: Dict =tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase__: Optional[int] =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase__: Union[str, Any] =reader.get_tensor(__a ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
lowerCamelCase__: str =int(key_name[9] )
elif key_name.startswith("pasts/out" ):
lowerCamelCase__: int =8
lowerCamelCase__: List[Any] ="model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase__: Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: Dict =torch.tensor(__a )
elif key_name.startswith("model/moe" ):
lowerCamelCase__: Tuple =int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
lowerCamelCase__: Optional[Any] ="model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
lowerCamelCase__: Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: List[Any] =torch.tensor(__a )
elif key_name.endswith("/softmlp/kernel" ):
lowerCamelCase__: List[Any] ="model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
lowerCamelCase__: Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: Dict =torch.tensor(__a )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
lowerCamelCase__: int =key_name[-9:-7]
for i in range(16 ):
lowerCamelCase__: Union[str, Any] ="model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
lowerCamelCase__: List[Any] =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase__: Tuple =torch.tensor(__a )
elif key_name.startswith("model/mlp" ):
lowerCamelCase__: str =int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
lowerCamelCase__: Optional[int] ="model.blocks.%d.feed_forward.mlp.wi.weight" % player
lowerCamelCase__: Optional[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: int =torch.tensor(__a )
elif key_name.endswith("/p1/bias" ):
lowerCamelCase__: List[Any] ="model.blocks.%d.feed_forward.mlp.wi.bias" % player
lowerCamelCase__: Dict =vnp.copy() # same because it is one dimensional
lowerCamelCase__: Optional[Any] =torch.tensor(__a )
elif key_name.endswith("/p2/kernel" ):
lowerCamelCase__: Dict ="model.blocks.%d.feed_forward.mlp.wo.weight" % player
lowerCamelCase__: Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: str =torch.tensor(__a )
elif key_name.endswith("/p2/bias" ):
lowerCamelCase__: Any ="model.blocks.%d.feed_forward.mlp.wo.bias" % player
lowerCamelCase__: int =vnp.copy() # same because it is one dimensional
lowerCamelCase__: List[Any] =torch.tensor(__a )
elif key_name.startswith("model/ln" ):
lowerCamelCase__: Dict =int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowerCamelCase__: Tuple ="model.blocks.%d.feed_forward.norm.bias" % player
lowerCamelCase__: str =vnp.copy() # same because it is one dimensional
lowerCamelCase__: List[Any] =torch.tensor(__a )
elif key_name.endswith("/g" ):
lowerCamelCase__: List[Any] ="model.blocks.%d.feed_forward.norm.weight" % player
lowerCamelCase__: Optional[Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__: Optional[int] =torch.tensor(__a )
elif key_name.startswith("model/att" ):
lowerCamelCase__: Tuple =int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
lowerCamelCase__: Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase__: str =state[:, 0, :, :]
lowerCamelCase__: List[str] =state[:, 1, :, :]
lowerCamelCase__: Optional[int] =state[:, 2, :, :]
lowerCamelCase__: Dict =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: List[str] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: str =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: Optional[int] ="model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
lowerCamelCase__: List[str] =torch.tensor(__a )
lowerCamelCase__: int ="model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
lowerCamelCase__: Optional[int] =torch.tensor(__a )
lowerCamelCase__: List[str] ="model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
lowerCamelCase__: List[str] =torch.tensor(__a )
elif key_name.endswith("/o/kernel" ):
lowerCamelCase__: List[Any] ="model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
lowerCamelCase__: List[str] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: Union[str, Any] =torch.tensor(__a )
elif key_name.startswith("model/an" ):
lowerCamelCase__: Any =int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowerCamelCase__: Optional[int] ="model.blocks.%d.self_attn.norm.bias" % player
lowerCamelCase__: Tuple =vnp.copy() # same because it is one dimensional
lowerCamelCase__: Any =torch.tensor(__a )
elif key_name.endswith("/g" ):
lowerCamelCase__: Optional[Any] ="model.blocks.%d.self_attn.norm.weight" % player
lowerCamelCase__: Tuple =vnp.copy() # same because it is one dimensional
lowerCamelCase__: Dict =torch.tensor(__a )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
lowerCamelCase__: int ={"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
lowerCamelCase__: int ="model.%s.weight" % nlayer
lowerCamelCase__: Optional[Any] =vnp.copy() # same in embedded
lowerCamelCase__: str =torch.tensor(__a )
if key_name.startswith("model/wte" ):
lowerCamelCase__: str ="lm_head.weight"
lowerCamelCase__: Optional[Any] =vnp.copy() # same in embedded
lowerCamelCase__: List[Any] =torch.tensor(__a )
elif key_name.startswith("model/wob" ):
lowerCamelCase__: int ="final_logits_bias"
lowerCamelCase__: Optional[Any] =vnp.copy() # same in embedded
lowerCamelCase__: List[Any] =state.reshape((1, -1) )
lowerCamelCase__: Tuple =torch.tensor(__a )
elif key_name == "model/dense/kernel":
lowerCamelCase__: List[str] ="model.last_project.weight"
lowerCamelCase__: Union[str, Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__: Optional[Any] =torch.tensor(__a )
elif key_name == "model/dense_1/bias":
lowerCamelCase__: Optional[int] ="model.last_project.bias"
lowerCamelCase__: Union[str, Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__: Optional[Any] =torch.tensor(__a )
torch.save(__a , args.output )
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__A = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 59
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43
| 0
|
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = len(a_ )
for i in range(length - 1 ):
__UpperCamelCase = i
for k in range(i + 1 , a_ ):
if collection[k] < collection[least]:
__UpperCamelCase = k
if least != i:
__UpperCamelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a_ = input("Enter numbers separated by a comma:\n").strip()
a_ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = "rwkv"
lowerCAmelCase__ : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[int] , snake_case : Optional[Any]=50277 , snake_case : str=1024 , snake_case : str=4096 , snake_case : Optional[Any]=32 , snake_case : Union[str, Any]=None , snake_case : Optional[Any]=None , snake_case : Optional[Any]=1E-5 , snake_case : List[str]=0 , snake_case : Optional[Any]=0 , snake_case : Union[str, Any]=6 , snake_case : Tuple=False , snake_case : Any=True , **snake_case : List[str] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = context_length
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = rescale_every
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
tie_word_embeddings=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 375
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.