code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Any
import numpy as np
def _lowerCamelCase ( __A : np.ndarray ) -> bool:
return np.array_equal(__A , matrix.conjugate().T )
def _lowerCamelCase ( __A : np.ndarray , __A : np.ndarray ) -> Any:
_UpperCAmelCase : int = v.conjugate().T
_UpperCAmelCase : str = v_star.dot(__A )
assert isinstance(__A , np.ndarray )
return (v_star_dot.dot(__A )) / (v_star.dot(__A ))
def _lowerCamelCase ( ) -> None:
_UpperCAmelCase : Dict = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCAmelCase : Optional[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(__A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__A , __A ) )
_UpperCAmelCase : Optional[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__A , __A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 485
|
def _lowerCamelCase ( __A : int ) -> str:
_UpperCAmelCase : Tuple = int(__A )
if decimal in (0, 1): # Exit cases for the recursion
return str(__A )
_UpperCAmelCase , _UpperCAmelCase : int = divmod(__A , 2 )
return binary_recursive(__A ) + str(__A )
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : List[Any] = str(__A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCAmelCase : Tuple = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCAmelCase : Dict = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__A ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 485
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]=10):
'''simple docstring'''
lowerCAmelCase__ : List[str] = []
for _ in range(lowerCamelCase_):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Optional[Any]=10):
'''simple docstring'''
lowerCAmelCase__ : str = []
for step in range(lowerCamelCase_):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ ,'''schedule.bin''')
torch.save(scheduler.state_dict() ,lowerCamelCase_)
lowerCAmelCase__ : int = torch.load(lowerCamelCase_)
scheduler.load_state_dict(lowerCamelCase_)
return lrs
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(__lowerCamelCase ) ,len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase ,__lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase ,__lowerCamelCase ,delta=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase__ : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase__ : Dict = AdamW(params=[w] ,lr=2e-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
lowerCAmelCase__ : Optional[int] = criterion(__lowerCamelCase ,__lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase__ : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase__ : List[str] = Adafactor(
params=[w] ,lr=1e-2 ,eps=(1e-30, 1e-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__lowerCamelCase ,weight_decay=0.0 ,relative_step=__lowerCamelCase ,scale_parameter=__lowerCamelCase ,warmup_init=__lowerCamelCase ,)
for _ in range(10_00 ):
lowerCAmelCase__ : Tuple = criterion(__lowerCamelCase ,__lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
snake_case_ =nn.Linear(50 , 50) if is_torch_available() else None
snake_case_ =AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
snake_case_ =10
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=None ) -> Any:
"""simple docstring"""
self.assertEqual(len(__lowerCamelCase ) ,len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase ,__lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase ,__lowerCamelCase ,delta=__lowerCamelCase ,msg=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase__ : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = data
lowerCAmelCase__ : str = scheduler_func(self.optimizer ,**__lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
lowerCAmelCase__ : Union[str, Any] = unwrap_schedule(__lowerCamelCase ,self.num_steps )
self.assertListAlmostEqual(
__lowerCamelCase ,__lowerCamelCase ,tol=1e-2 ,msg=f"""failed for {scheduler_func} in normal scheduler""" ,)
lowerCAmelCase__ : Union[str, Any] = scheduler_func(self.optimizer ,**__lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule
lowerCAmelCase__ : Optional[int] = unwrap_and_save_reload_schedule(__lowerCamelCase ,self.num_steps )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ,msg=f"""failed for {scheduler_func} in save and reload""" )
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Any = fn
def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.fn(*__lowerCamelCase ,**__lowerCamelCase )
@classmethod
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = list(map(self ,scheduler.lr_lambdas ) )
| 90
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : list[int]):
'''simple docstring'''
return len(set(lowerCamelCase_)) == len(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 1
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , __snake_case : NestedDataStructureLike[PathLike] , __snake_case : Optional[NamedSplit] = None , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[str] = None , __snake_case : Optional[int] = None , **__snake_case : str , )-> Dict:
super().__init__(
UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case = field
snake_case = path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else {self.split: path_or_paths}
snake_case = Json(
cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , field=UpperCAmelCase_ , **UpperCAmelCase_ , )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
# Build iterable dataset
if self.streaming:
snake_case = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case = None
snake_case = None
snake_case = None
snake_case = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , )
snake_case = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Dataset , __snake_case : Union[PathLike, BinaryIO] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , **__snake_case : Tuple , )-> int:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
snake_case = dataset
snake_case = path_or_buf
snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case = num_proc
snake_case = """utf-8"""
snake_case = to_json_kwargs
def lowerCAmelCase ( self : List[str] )-> Any:
snake_case = self.to_json_kwargs.pop("""path_or_buf""" , UpperCAmelCase_ )
snake_case = self.to_json_kwargs.pop("""orient""" , """records""" )
snake_case = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
snake_case = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
snake_case = self.to_json_kwargs.pop("""compression""" , UpperCAmelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=UpperCAmelCase_ ) as buffer:
snake_case = self._write(file_obj=UpperCAmelCase_ , orient=UpperCAmelCase_ , lines=UpperCAmelCase_ , index=UpperCAmelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
snake_case = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase_ , lines=UpperCAmelCase_ , index=UpperCAmelCase_ , **self.to_json_kwargs )
return written
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : List[str] )-> str:
snake_case , snake_case , snake_case , snake_case , snake_case = args
snake_case = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase_ , orient=UpperCAmelCase_ , lines=UpperCAmelCase_ , index=UpperCAmelCase_ , **UpperCAmelCase_ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase ( self : List[Any] , __snake_case : BinaryIO , __snake_case : Any , __snake_case : Any , __snake_case : Optional[int] , **__snake_case : List[Any] , )-> str:
snake_case = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
snake_case = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase_ )
else:
snake_case , snake_case = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase_ , UpperCAmelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(UpperCAmelCase_ )
return written
| 369
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] ="""imagegpt"""
A__ : Union[str, Any] =["""past_key_values"""]
A__ : Union[str, Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCAmelCase_ : Dict=512 + 1 , UpperCAmelCase_ : Union[str, Any]=32 * 32 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple="quick_gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=1e-5 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ )
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : List[str] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def A_ ( self : Optional[int] , UpperCAmelCase_ : "FeatureExtractionMixin" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , ):
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
return inputs
| 472
| 0
|
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(__lowercase , x % y )
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(__lowercase , __lowercase )
def snake_case (__lowercase = 20 ) -> int:
'''simple docstring'''
_snake_case : List[Any] = 1
for i in range(1 , n + 1 ):
_snake_case : Tuple = lcm(__lowercase , __lowercase )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 580
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[int] = np.inf
def set_batch_size(__lowercase ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
_snake_case : Dict = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
_snake_case : str = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
_snake_case : Any = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , **lowercase_ , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
_snake_case : int = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
_snake_case : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1]
_snake_case : Optional[Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def UpperCamelCase ( self ):
# Build iterable dataset
if self.streaming:
_snake_case : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case : Union[str, Any] = None
_snake_case : Optional[Any] = None
_snake_case : Any = None
_snake_case : Tuple = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
_snake_case : int = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
_snake_case : Optional[int] = dataset
_snake_case : List[str] = path_or_buf
_snake_case : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
_snake_case : int = parquet_writer_kwargs
def UpperCamelCase ( self ):
_snake_case : Any = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
_snake_case : Union[str, Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs )
else:
_snake_case : int = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs )
return written
def UpperCamelCase ( self , lowercase_ , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : List[Any] = parquet_writer_kwargs.pop("path_or_buf" , lowercase_ )
_snake_case : str = self.dataset.features.arrow_schema
_snake_case : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowercase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
_snake_case : Dict = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_ )
written += batch.nbytes
writer.close()
return written
| 580
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : int , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.ModuleList(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : Any = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : List[Any] = False , lowerCamelCase_ : int = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
UpperCamelCase = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
UpperCamelCase = down_samples, mid_sample
else:
UpperCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Dict = False , lowerCamelCase_ : Any = None , ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
UpperCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
UpperCamelCase = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
UpperCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(lowerCamelCase_ )
| 537
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28
| 0
|
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = [True] * n
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE = i * 2
while index < n:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = index + i
SCREAMING_SNAKE_CASE = [2]
for i in range(3 , UpperCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase__ )
return primes
def __lowerCamelCase (UpperCAmelCase__ : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
SCREAMING_SNAKE_CASE = math.floor(math.sqrt(UpperCAmelCase__ ) ) + 1_0_0
SCREAMING_SNAKE_CASE = prime_sieve(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE = primes[prime_index + 1]
SCREAMING_SNAKE_CASE = last_prime**2
SCREAMING_SNAKE_CASE = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 711
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ ) ->int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCAmelCase_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCAmelCase_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 522
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCamelCase_ ( *UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = list(UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCamelCase_ ( UpperCAmelCase_ ) ->bool:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCamelCase_ ( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ) ->str:
"""simple docstring"""
if function is None:
return functools.partial(UpperCAmelCase_ , starting_batch_size=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase : Optional[int] = list(inspect.signature(UpperCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase_ ) < (len(UpperCAmelCase_ ) + 1):
__UpperCAmelCase : Dict = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 522
| 1
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a ="""0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls : Union[str, Any]):
__lowerCamelCase : int = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : Dict):
try:
delete_repo(token=cls._token ,repo_id='test-model-flax')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-model-flax-org')
except HTTPError:
pass
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = BertConfig(
vocab_size=9_9 ,hidden_size=3_2 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=3_7)
__lowerCamelCase : str = FlaxBertModel(SCREAMING_SNAKE_CASE__)
model.push_to_hub('test-model-flax' ,use_auth_token=self._token)
__lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax")
__lowerCamelCase : List[Any] = flatten_dict(unfreeze(model.params))
__lowerCamelCase : Optional[int] = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowerCamelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3 ,msg=F"{key} not identical")
# Reset repo
delete_repo(token=self._token ,repo_id='test-model-flax')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE__ ,repo_id='test-model-flax' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : str = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax")
__lowerCamelCase : str = flatten_dict(unfreeze(model.params))
__lowerCamelCase : Union[str, Any] = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowerCamelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3 ,msg=F"{key} not identical")
def lowerCAmelCase ( self : int):
__lowerCamelCase : Union[str, Any] = BertConfig(
vocab_size=9_9 ,hidden_size=3_2 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=3_7)
__lowerCamelCase : List[str] = FlaxBertModel(SCREAMING_SNAKE_CASE__)
model.push_to_hub('valid_org/test-model-flax-org' ,use_auth_token=self._token)
__lowerCamelCase : Any = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
__lowerCamelCase : List[str] = flatten_dict(unfreeze(model.params))
__lowerCamelCase : int = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowerCamelCase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3 ,msg=F"{key} not identical")
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-model-flax-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
SCREAMING_SNAKE_CASE__ ,repo_id='valid_org/test-model-flax-org' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : List[str] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
__lowerCamelCase : str = flatten_dict(unfreeze(model.params))
__lowerCamelCase : int = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowerCamelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3 ,msg=F"{key} not identical")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : int = True
__lowerCamelCase : List[str] = flatten_dict(modela.params )
__lowerCamelCase : Any = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__lowerCamelCase : Tuple = False
return models_are_equal
@require_flax
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
__lowerCamelCase : str = FlaxBertModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder=SCREAMING_SNAKE_CASE__)
self.assertTrue(check_models_equal(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
__lowerCamelCase : Dict = FlaxBertModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) ,max_shard_size='10KB')
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder=SCREAMING_SNAKE_CASE__)
self.assertTrue(check_models_equal(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = 'bert'
__lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[int] = 'bert'
__lowerCamelCase : str = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
| 337
|
import os
import pytest
from attr import dataclass
a ="""us-east-1""" # defaults region
@dataclass
class A_ :
_UpperCAmelCase : str
_UpperCAmelCase : Tuple = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_UpperCAmelCase : Optional[int] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5_500,
}
_UpperCAmelCase : int = {**hyperparameters, '''max_steps''': 1_000}
@property
def lowerCAmelCase ( self : Dict):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase ( self : List[str]):
return F"{self.framework}-transfromers-test"
@property
def lowerCAmelCase ( self : List[Any]):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def lowerCAmelCase ( self : List[Any]):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
| 337
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase__ = TypeVar("T")
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = []
__lowercase = {}
__lowercase = 0
def __len__( self : str ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : int ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase_ ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : T , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__lowercase = self.elements
self.elements += 1
self._bubble_up(lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowercase , __lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowercase , __lowercase = self.heap[0]
self._bubble_down(lowerCamelCase__ )
return elem
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : T , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
__lowercase = self.position_map[elem]
__lowercase = (elem, weight)
if position > 0:
__lowercase = get_parent_position(lowerCamelCase__ )
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCamelCase__ )
else:
self._bubble_down(lowerCamelCase__ )
else:
self._bubble_down(lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : T ) -> None:
"""simple docstring"""
__lowercase = self.position_map[elem]
if curr_pos == 0:
return None
__lowercase = get_parent_position(lowerCamelCase__ )
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_up(lowerCamelCase__ )
return None
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : T ) -> None:
"""simple docstring"""
__lowercase = self.position_map[elem]
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase = get_child_left_position(lowerCamelCase__ )
__lowercase = get_child_right_position(lowerCamelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
if child_left_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
else:
return None
if child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
return None
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
__lowercase = self.heap[nodea_pos][0]
__lowercase = self.heap[nodea_pos][0]
__lowercase , __lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowercase = nodea_pos
__lowercase = nodea_pos
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = {}
__lowercase = 0
def __repr__( self : str ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : str ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__lowercase = {}
self.nodes += 1
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : T , lowerCamelCase__ : T , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
__lowercase = weight
__lowercase = weight
def _A( UpperCamelCase__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
__lowercase = {node: maxsize for node in graph.connections}
__lowercase = {node: None for node in graph.connections}
__lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase__ , UpperCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowercase = priority_queue.extract_min()
__lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
__lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
__lowercase = node
return dist, parent
| 332
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = MgpstrTokenizer
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Any = {}
UpperCamelCase_ : List[str] = False
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
# fmt: off
__lowercase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowercase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
def UpperCAmelCase_ ( self : List[str] , **lowerCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = '''tester'''
__lowercase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowercase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
__lowercase = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowercase , __lowercase = self.get_input_output_texts(lowerCamelCase__ )
__lowercase = tokenizer.tokenize(lowerCamelCase__ )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
__lowercase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertNotEqual(len(lowerCamelCase__ ) , 0 )
__lowercase = tokenizer.decode(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowerCamelCase__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
pass
| 332
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(__UpperCAmelCase ) as metadata_file:
snake_case: Dict =json.load(__UpperCAmelCase )
snake_case: Dict =LukeConfig(use_entity_aware_attention=__UpperCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
snake_case: List[Any] =torch.load(__UpperCAmelCase , map_location='cpu' )['module']
# Load the entity vocab file
snake_case: Optional[Any] =load_original_entity_vocab(__UpperCAmelCase )
# add an entry for [MASK2]
snake_case: str =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case: List[Any] =XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case: Optional[int] =AddedToken('<ent>' , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
snake_case: List[Any] =AddedToken('<ent2>' , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
snake_case: List[Any] =json.load(__UpperCAmelCase )
snake_case: Any ='MLukeTokenizer'
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[Any] =MLukeTokenizer.from_pretrained(__UpperCAmelCase )
# Initialize the embeddings of the special tokens
snake_case: str =tokenizer.convert_tokens_to_ids(['@'] )[0]
snake_case: List[str] =tokenizer.convert_tokens_to_ids(['#'] )[0]
snake_case: int =state_dict['embeddings.word_embeddings.weight']
snake_case: List[str] =word_emb[ent_init_index].unsqueeze(0 )
snake_case: Any =word_emb[enta_init_index].unsqueeze(0 )
snake_case: Optional[int] =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case: Optional[int] =state_dict[bias_name]
snake_case: Optional[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
snake_case: Any =decoder_bias[enta_init_index].unsqueeze(0 )
snake_case: str =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case: Union[str, Any] =f'''encoder.layer.{layer_index}.attention.self.'''
snake_case: Union[str, Any] =state_dict[prefix + matrix_name]
snake_case: Union[str, Any] =state_dict[prefix + matrix_name]
snake_case: List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case: Optional[Any] =state_dict['entity_embeddings.entity_embeddings.weight']
snake_case: Dict =entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
snake_case: Optional[int] =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case: Optional[int] =state_dict['entity_predictions.bias']
snake_case: Union[str, Any] =entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
snake_case: List[Any] =torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case: int =LukeForMaskedLM(config=__UpperCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
snake_case: Optional[int] =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
snake_case: Union[str, Any] =state_dict[key]
else:
snake_case: Optional[int] =state_dict[key]
snake_case: List[str] =model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if set(__UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case: str =MLukeTokenizer.from_pretrained(__UpperCAmelCase , task='entity_classification' )
snake_case: Union[str, Any] ='ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
snake_case: List[str] =(0, 9)
snake_case: Tuple =tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors='pt' )
snake_case: Union[str, Any] =model(**__UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case: Any =torch.Size((1, 33, 7_68) )
snake_case: str =torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case: Optional[int] =torch.Size((1, 1, 7_68) )
snake_case: Optional[Any] =torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case: Tuple =MLukeTokenizer.from_pretrained(__UpperCAmelCase )
snake_case: Union[str, Any] ='Tokyo is the capital of <mask>.'
snake_case: str =(24, 30)
snake_case: List[Any] =tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors='pt' )
snake_case: int =model(**__UpperCAmelCase )
snake_case: Union[str, Any] =encoding['input_ids'][0].tolist()
snake_case: int =input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
snake_case: int =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCAmelCase )
snake_case: Any =outputs.entity_logits[0][0].argmax().item()
snake_case: Any =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__UpperCAmelCase ) )
model.save_pretrained(__UpperCAmelCase )
def a_ ( __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case: Union[str, Any] =['[MASK]', '[PAD]', '[UNK]']
snake_case: List[str] =[json.loads(__UpperCAmelCase ) for line in open(__UpperCAmelCase )]
snake_case: int ={}
for entry in data:
snake_case: int =entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case: Optional[Any] =entity_id
break
snake_case: int =f'''{language}:{entity_name}'''
snake_case: Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 703
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Optional[int] =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCAmelCase : List[str] = StableDiffusionLatentUpscalePipeline
UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : str = frozenset([] )
UpperCAmelCase : Any = True
@property
def UpperCamelCase ( self : Dict ) -> List[str]:
snake_case: Optional[int] =1
snake_case: List[Any] =4
snake_case: Optional[Any] =(1_6, 1_6)
snake_case: Union[str, Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
def UpperCamelCase ( self : Any ) -> Tuple:
torch.manual_seed(0 )
snake_case: Dict =UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=a_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=a_ , only_cross_attention=a_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
snake_case: int =AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
snake_case: Optional[int] =EulerDiscreteScheduler(prediction_type='sample' )
snake_case: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
snake_case: Dict =CLIPTextModel(a_ )
snake_case: List[str] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case: Tuple ={
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : int , a_ : Optional[int] , a_ : str=0 ) -> List[Any]:
if str(a_ ).startswith('mps' ):
snake_case: str =torch.manual_seed(a_ )
else:
snake_case: List[str] =torch.Generator(device=a_ ).manual_seed(a_ )
snake_case: Optional[Any] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
snake_case: Optional[Any] ='cpu'
snake_case: List[Any] =self.get_dummy_components()
snake_case: str =self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: Union[str, Any] =self.get_dummy_inputs(a_ )
snake_case: Any =pipe(**a_ ).images
snake_case: Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
snake_case: Dict =np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
snake_case: Any =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def UpperCamelCase ( self : Tuple ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase ( self : str ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
snake_case: Dict =[
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
snake_case: Union[str, Any] =self.get_dummy_components()
snake_case: str =self.pipeline_class(**a_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: str =self.get_dummy_inputs(a_ )
snake_case: Optional[int] =2
snake_case: List[str] =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case: int =getattr(a_ , scheduler_enum.name )
snake_case: Optional[Any] =scheduler_cls.from_config(pipe.scheduler.config )
snake_case: str =pipe(**a_ )[0]
outputs.append(a_ )
assert check_same_shape(a_ )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any ) -> str:
snake_case: List[str] =torch.manual_seed(3_3 )
snake_case: Optional[int] =StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
snake_case: Optional[Any] =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
snake_case: str ='a photo of an astronaut high resolution, unreal engine, ultra realistic'
snake_case: Any =pipe(a_ , generator=a_ , output_type='latent' ).images
snake_case: Optional[int] =upscaler(
prompt=a_ , image=a_ , num_inference_steps=2_0 , guidance_scale=0 , generator=a_ , output_type='np' , ).images[0]
snake_case: str =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase ( self : Dict ) -> str:
snake_case: Optional[Any] =torch.manual_seed(3_3 )
snake_case: Union[str, Any] =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
snake_case: List[str] ='the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
snake_case: Union[str, Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
snake_case: List[str] =upscaler(
prompt=a_ , image=a_ , num_inference_steps=2_0 , guidance_scale=0 , generator=a_ , output_type='np' , ).images[0]
snake_case: List[Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 347
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = '''MobileNetV1Config'''
# Base docstring
_lowercase = '''google/mobilenet_v1_1.0_224'''
_lowercase = [1, 10_24, 7, 7]
# Image classification docstring
_lowercase = '''google/mobilenet_v1_1.0_224'''
_lowercase = '''tabby, tabby cat'''
_lowercase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _A (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[Any]=None ) ->Dict:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = {}
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase__ : Optional[Any] = model.mobilenet_va
else:
lowerCamelCase__ : Optional[Any] = model
lowerCamelCase__ : Any = """MobilenetV1/Conv2d_0/"""
lowerCamelCase__ : List[Any] = backbone.conv_stem.convolution.weight
lowerCamelCase__ : str = backbone.conv_stem.normalization.bias
lowerCamelCase__ : List[Any] = backbone.conv_stem.normalization.weight
lowerCamelCase__ : Dict = backbone.conv_stem.normalization.running_mean
lowerCamelCase__ : Dict = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCamelCase__ : Dict = i + 1
lowerCamelCase__ : Any = i * 2
lowerCamelCase__ : Dict = backbone.layer[pt_index]
lowerCamelCase__ : Dict = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
lowerCamelCase__ : Any = pointer.convolution.weight
lowerCamelCase__ : int = pointer.normalization.bias
lowerCamelCase__ : int = pointer.normalization.weight
lowerCamelCase__ : int = pointer.normalization.running_mean
lowerCamelCase__ : Tuple = pointer.normalization.running_var
lowerCamelCase__ : List[str] = backbone.layer[pt_index + 1]
lowerCamelCase__ : Optional[Any] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
lowerCamelCase__ : str = pointer.convolution.weight
lowerCamelCase__ : Any = pointer.normalization.bias
lowerCamelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCamelCase__ : str = pointer.normalization.running_mean
lowerCamelCase__ : Optional[int] = pointer.normalization.running_var
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase__ : Optional[Any] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
lowerCamelCase__ : Dict = model.classifier.weight
lowerCamelCase__ : str = model.classifier.bias
return tf_to_pt_map
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ) ->str:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
lowerCamelCase__ : Tuple = tf.train.list_variables(lowerCAmelCase_ )
lowerCamelCase__ : List[Any] = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
lowerCamelCase__ : List[str] = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ : Dict = array
# Build TF to PyTorch weights loading map
lowerCamelCase__ : Dict = _build_tf_to_pytorch_map(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
lowerCamelCase__ : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
lowerCamelCase__ : Union[str, Any] = np.transpose(lowerCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase__ : Any = array.squeeze().transpose()
else:
lowerCamelCase__ : List[Any] = np.transpose(lowerCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
lowerCamelCase__ : int = torch.from_numpy(lowerCAmelCase_ )
tf_weights.pop(lowerCAmelCase_ , lowerCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , lowerCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , lowerCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , lowerCAmelCase_ )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def _A (UpperCamelCase : Tuple , UpperCamelCase : List[str] ) ->torch.Tensor:
'''simple docstring'''
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = features.shape[-2:]
lowerCamelCase__ ,lowerCamelCase__ : Tuple = conv_layer.stride
lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase__ : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
lowerCamelCase__ : List[str] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCamelCase__ : Optional[int] = max(kernel_width - stride_width , 0 )
else:
lowerCamelCase__ : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
lowerCamelCase__ : List[str] = pad_along_width // 2
lowerCamelCase__ : Optional[int] = pad_along_width - pad_left
lowerCamelCase__ : Union[str, Any] = pad_along_height // 2
lowerCamelCase__ : int = pad_along_height - pad_top
lowerCamelCase__ : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase_ , lowerCAmelCase_ , """constant""" , 0.0 )
class __A ( nn.Module ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = 1 , __magic_name__ = False , __magic_name__ = True , __magic_name__ = True , ):
super().__init__()
lowerCamelCase__ : List[Any] = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
lowerCamelCase__ : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase__ : List[Any] = nn.Convad(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode="""zeros""" , )
if use_normalization:
lowerCamelCase__ : Union[str, Any] = nn.BatchNormad(
num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , )
else:
lowerCamelCase__ : List[str] = None
if use_activation:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCamelCase ):
lowerCamelCase__ : int = ACTaFN[config.hidden_act]
else:
lowerCamelCase__ : Any = config.hidden_act
else:
lowerCamelCase__ : int = None
def _snake_case (self , __magic_name__ ):
if self.config.tf_padding:
lowerCamelCase__ : str = apply_tf_padding(__lowerCamelCase , self.convolution )
lowerCamelCase__ : List[str] = self.convolution(__lowerCamelCase )
if self.normalization is not None:
lowerCamelCase__ : int = self.normalization(__lowerCamelCase )
if self.activation is not None:
lowerCamelCase__ : Union[str, Any] = self.activation(__lowerCamelCase )
return features
class __A ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase :str = MobileNetVaConfig
UpperCamelCase :Dict = load_tf_weights_in_mobilenet_va
UpperCamelCase :List[str] = '''mobilenet_v1'''
UpperCamelCase :Optional[Any] = '''pixel_values'''
UpperCamelCase :List[Any] = False
def _snake_case (self , __magic_name__ ):
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __SCREAMING_SNAKE_CASE , )
class __A ( __SCREAMING_SNAKE_CASE ):
def __init__(self , __magic_name__ , __magic_name__ = True ):
super().__init__(__lowerCamelCase )
lowerCamelCase__ : List[Any] = config
lowerCamelCase__ : Tuple = 32
lowerCamelCase__ : Tuple = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCamelCase__ : Tuple = MobileNetVaConvLayer(
__lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , )
lowerCamelCase__ : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase__ : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
lowerCamelCase__ : Any = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase__ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ) )
lowerCamelCase__ : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _snake_case (self , __magic_name__ ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case (self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ):
lowerCamelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCamelCase__ : List[Any] = self.conv_stem(__lowerCamelCase )
lowerCamelCase__ : List[str] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase__ : List[Any] = layer_module(__lowerCamelCase )
if output_hidden_states:
lowerCamelCase__ : Union[str, Any] = all_hidden_states + (hidden_states,)
lowerCamelCase__ : Dict = hidden_states
if self.pooler is not None:
lowerCamelCase__ : Union[str, Any] = torch.flatten(self.pooler(__lowerCamelCase ) , start_dim=1 )
else:
lowerCamelCase__ : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __SCREAMING_SNAKE_CASE , )
class __A ( __SCREAMING_SNAKE_CASE ):
def __init__(self , __magic_name__ ):
super().__init__(__lowerCamelCase )
lowerCamelCase__ : Any = config.num_labels
lowerCamelCase__ : Dict = MobileNetVaModel(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase__ : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase )
lowerCamelCase__ : Dict = nn.Linear(__lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case (self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ):
lowerCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : List[Any] = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase )
lowerCamelCase__ : Tuple = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ : str = self.classifier(self.dropout(__lowerCamelCase ) )
lowerCamelCase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ : Any = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ : Union[str, Any] = """single_label_classification"""
else:
lowerCamelCase__ : Optional[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCamelCase__ : str = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase__ : Union[str, Any] = loss_fct(__lowerCamelCase , __lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ : Dict = CrossEntropyLoss()
lowerCamelCase__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ : List[str] = BCEWithLogitsLoss()
lowerCamelCase__ : Optional[int] = loss_fct(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
lowerCamelCase__ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
| 157
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''openai/whisper-base'''
A__ : List[Any] = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
A__ : str = '''transcriber'''
A__ : List[Any] = WhisperProcessor
A__ : Optional[int] = WhisperForConditionalGeneration
A__ : List[str] = ['''audio''']
A__ : Optional[int] = ['''text''']
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.pre_processor(__lowerCamelCase , return_tensors='''pt''' ).input_features
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return self.model.generate(inputs=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 103
| 0
|
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if index == number_of_items:
return 0
lowerCamelCase : Any =0
lowerCamelCase : List[str] =0
lowerCamelCase : Optional[int] =knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase : Union[str, Any] =values[index] + knapsack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = '''▁'''
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
snake_case_ = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
snake_case_ = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Union[str, Any] = ["input_ids", "attention_mask"]
lowerCamelCase :List[int] = []
lowerCamelCase :List[int] = []
def __init__( self , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="<unk>" , __lowercase="m2m100" , __lowercase = None , __lowercase=8 , **__lowercase , ) -> None:
lowerCamelCase : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : List[str] =language_codes
lowerCamelCase : int =FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase : str ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
lowerCamelCase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowercase , tgt_lang=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , language_codes=__lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowercase , **__lowercase , )
lowerCamelCase : Dict =vocab_file
lowerCamelCase : List[Any] =load_json(__lowercase )
lowerCamelCase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowerCamelCase : List[Any] =spm_file
lowerCamelCase : str =load_spm(__lowercase , self.sp_model_kwargs )
lowerCamelCase : Tuple =len(self.encoder )
lowerCamelCase : Optional[int] ={
self.get_lang_token(__lowercase ): self.encoder_size + i for i, lang_code in enumerate(__lowercase )
}
lowerCamelCase : Tuple ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowercase )}
lowerCamelCase : Tuple ={v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase : Optional[Any] =src_lang if src_lang is not None else '''en'''
lowerCamelCase : Any =tgt_lang
lowerCamelCase : List[Any] =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase : Optional[Any] =num_madeup_words
@property
def __lowercase ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self , __lowercase ) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def __lowercase ( self , __lowercase ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowercase , self.encoder[self.unk_token] )
def __lowercase ( self , __lowercase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowercase , self.unk_token )
def __lowercase ( self , __lowercase ) -> str:
lowerCamelCase : Dict =[]
lowerCamelCase : Dict =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
lowerCamelCase : str =[]
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __lowercase ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
lowerCamelCase : int =[1] * len(self.prefix_tokens )
lowerCamelCase : List[str] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def __lowercase ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self ) -> Dict:
lowerCamelCase : Dict ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCamelCase : Optional[Any] =self.__dict__.copy()
lowerCamelCase : Union[str, Any] =None
return state
def __setstate__( self , __lowercase ) -> None:
lowerCamelCase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase : Optional[int] ={}
lowerCamelCase : Optional[Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def __lowercase ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
lowerCamelCase : Optional[Any] =Path(__lowercase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
lowerCamelCase : List[str] =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase : Tuple =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowercase )
elif not os.path.isfile(self.spm_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCamelCase : List[Any] =self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (str(__lowercase ), str(__lowercase ))
def __lowercase ( self , __lowercase , __lowercase = "en" , __lowercase = None , __lowercase = "ro" , **__lowercase , ) -> BatchEncoding:
lowerCamelCase : Union[str, Any] =src_lang
lowerCamelCase : Optional[int] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , **__lowercase ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase : List[Any] =src_lang
lowerCamelCase : str =self(__lowercase , add_special_tokens=__lowercase , **__lowercase )
lowerCamelCase : Tuple =self.get_lang_id(__lowercase )
lowerCamelCase : Optional[int] =tgt_lang_id
return inputs
def __lowercase ( self ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ) -> List[str]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Union[str, Any] =self.get_lang_token(__lowercase )
lowerCamelCase : List[Any] =self.lang_token_to_id[lang_token]
lowerCamelCase : Optional[Any] =[self.cur_lang_id]
lowerCamelCase : Union[str, Any] =[self.eos_token_id]
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Tuple =self.get_lang_token(__lowercase )
lowerCamelCase : Tuple =self.lang_token_to_id[lang_token]
lowerCamelCase : List[Any] =[self.cur_lang_id]
lowerCamelCase : Tuple =[self.eos_token_id]
def __lowercase ( self , __lowercase ) -> str:
return self.lang_code_to_token[lang]
def __lowercase ( self , __lowercase ) -> int:
lowerCamelCase : List[str] =self.get_lang_token(__lowercase )
return self.lang_token_to_id[lang_token]
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> sentencepiece.SentencePieceProcessor:
lowerCamelCase : List[Any] =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ )
spm.Load(str(SCREAMING_SNAKE_CASE_ ) )
return spm
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Union[Dict, List]:
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
| 262
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 94
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MvpTokenizer
UpperCamelCase_ = MvpTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = filter_roberta_detectors
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase : Dict =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Tuple =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[Any] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : List[Any] ={'''unk_token''': '''<unk>'''}
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : List[str] , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def A__ ( self : Any ) -> int:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase : List[str] =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] =tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : Union[str, Any] =batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : Any =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : int =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Optional[Any] =tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =['''A long paragraph for summarization.''']
lowercase : List[Any] =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] =tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
lowercase : Optional[int] =inputs['''input_ids''']
lowercase : Optional[Any] =inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowercase : Tuple =self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowercase : Optional[Any] ='''A, <mask> AllenNLP sentence.'''
lowercase : int =tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
lowercase : List[Any] =tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase : str =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 94
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
@flax_register_to_config
class UpperCamelCase_ ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''DownBlock2D''',
)
UpperCAmelCase__ = ('''UpBlock2D''', '''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''')
UpperCAmelCase__ = False
UpperCAmelCase__ = (320, 640, 1280, 1280)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 1280
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any]) ->Tuple:
'''simple docstring'''
A__ = (1, self.in_channels, self.sample_size, self.sample_size)
A__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa)
A__ = jnp.ones((1,) , dtype=jnp.intaa)
A__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
A__ , A__ = jax.random.split(__lowerCAmelCase)
A__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)["params"]
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.block_out_channels
A__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A__ = self.num_attention_heads or self.attention_head_dim
# input
A__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
A__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype)
A__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
A__ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
A__ = (num_attention_heads,) * len(self.down_block_types)
# down
A__ = []
A__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
A__ = output_channel
A__ = block_out_channels[i]
A__ = i == len(__lowerCAmelCase) - 1
if down_block_type == "CrossAttnDownBlock2D":
A__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase)
A__ = down_blocks
# mid
A__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
A__ = []
A__ = list(reversed(__lowerCAmelCase))
A__ = list(reversed(__lowerCAmelCase))
A__ = list(reversed(__lowerCAmelCase))
A__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
A__ = output_channel
A__ = reversed_block_out_channels[i]
A__ = reversed_block_out_channels[min(i + 1 , len(__lowerCAmelCase) - 1)]
A__ = i == len(__lowerCAmelCase) - 1
if up_block_type == "CrossAttnUpBlock2D":
A__ = FlaxCrossAttnUpBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A__ = FlaxUpBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowerCAmelCase)
A__ = output_channel
A__ = up_blocks
# out
A__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
A__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str] = True , UpperCAmelCase__ : str = False , ) ->Optional[int]:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , jnp.ndarray):
A__ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(__lowerCAmelCase , jnp.ndarray) and len(timesteps.shape) == 0:
A__ = timesteps.astype(dtype=jnp.floataa)
A__ = jnp.expand_dims(__lowerCAmelCase , 0)
A__ = self.time_proj(__lowerCAmelCase)
A__ = self.time_embedding(__lowerCAmelCase)
# 2. pre-process
A__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
A__ = self.conv_in(__lowerCAmelCase)
# 3. down
A__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
A__ , A__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train)
else:
A__ , A__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowerCAmelCase , __lowerCAmelCase):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A__ = new_down_block_res_samples
# 4. mid
A__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A__ = down_block_res_samples[-(self.layers_per_block + 1) :]
A__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
A__ = up_block(
__lowerCAmelCase , temb=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , deterministic=not train , )
else:
A__ = up_block(__lowerCAmelCase , temb=__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , deterministic=not train)
# 6. post-process
A__ = self.conv_norm_out(__lowerCAmelCase)
A__ = nn.silu(__lowerCAmelCase)
A__ = self.conv_out(__lowerCAmelCase)
A__ = jnp.transpose(__lowerCAmelCase , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowerCAmelCase)
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = IFInpaintingPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=0) ->int:
'''simple docstring'''
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__)).to(UpperCAmelCase__)
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__)).to(UpperCAmelCase__)
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 177
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class A ( __lowercase ):
snake_case__ :int = "bloom"
snake_case__ :str = ["past_key_values"]
snake_case__ :List[str] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : List[str] , __magic_name__ : Union[str, Any]=250880 , __magic_name__ : Any=64 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Optional[Any]=8 , __magic_name__ : Tuple=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : Any=True , __magic_name__ : Any=1 , __magic_name__ : List[Any]=2 , __magic_name__ : Any=False , __magic_name__ : Any=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Any=1 , __magic_name__ : Optional[Any]=False , **__magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ = kwargs.pop("n_embed" , snake_case_ )
lowerCAmelCase__ = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = pretraining_tp
lowerCAmelCase__ = apply_residual_connection_post_layernorm
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = slow_but_exact
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
class A ( __lowercase ):
snake_case__ :Optional[Any] = version.parse('1.12' )
def __init__( self : str , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , "pad_token_id" , snake_case_ ):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case_ , direction="inputs" , inverted_values_shape=snake_case_ )
lowerCAmelCase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self._config.n_head
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return 1E-3
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : "PreTrainedTokenizer" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , ):
"""simple docstring"""
lowerCAmelCase__ = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase__ = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return 13
| 48
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset([] )
_SCREAMING_SNAKE_CASE : Optional[int] = True
@property
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = 1
__a : Dict = 4
__a : int = (1_6, 1_6)
__a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def lowerCAmelCase (self : int ):
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a : Dict = EulerDiscreteScheduler(prediction_type='''sample''' )
__a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
__a : int = CLIPTextModel(snake_case_ )
__a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Tuple = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple , snake_case_ : List[Any]=0 ):
if str(snake_case_ ).startswith('''mps''' ):
__a : Any = torch.manual_seed(snake_case_ )
else:
__a : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = '''cpu'''
__a : Union[str, Any] = self.get_dummy_components()
__a : Any = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : int = self.get_dummy_inputs(snake_case_ )
__a : Dict = pipe(**snake_case_ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
__a : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
__a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase (self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase (self : List[str] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a : List[str] = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : Optional[Any] = self.get_dummy_inputs(snake_case_ )
__a : List[Any] = 2
__a : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Union[str, Any] = getattr(snake_case_ , scheduler_enum.name )
__a : Any = scheduler_cls.from_config(pipe.scheduler.config )
__a : Any = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = torch.manual_seed(3_3 )
__a : List[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : int = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a : Dict = pipe(snake_case_ , generator=snake_case_ , output_type='''latent''' ).images
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase (self : List[Any] ):
__a : int = torch.manual_seed(3_3 )
__a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 521
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( __a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__()
# make sure scheduler can always be converted to DDIM
a_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , A__ ):
a_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
a_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A__ , A__ ) and len(A__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
a_ = randn_tensor(A__ , generator=A__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a_ = self.unet(A__ , A__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a_ = self.scheduler.step(
A__ , A__ , A__ , eta=A__ , use_clipped_model_output=A__ , generator=A__ ).prev_sample
a_ = (image / 2 + 0.5).clamp(0 , 1 )
a_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 700
|
from typing import Any
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
a_ = [input_list.count(UpperCamelCase ) for value in input_list]
a_ = max(UpperCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 403
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=8 ) -> str:
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __A( UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : DDPMScheduler , __UpperCamelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : int ):
if latents is None:
lowerCamelCase_ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase_ = latents.to(__UpperCamelCase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase_ = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple , __UpperCamelCase : Any=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase_ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : Optional[Any] ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self : List[str] , __UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int = 5_1_2 , __UpperCamelCase : int = 5_1_2 , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 4.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , ):
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ = torch.cat(__UpperCamelCase , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase_ = hint.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
lowerCamelCase_ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.movq.config.latent_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {"""image_embeds""": image_embeds, """hint""": hint}
lowerCamelCase_ = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 272
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A:
def __init__( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : Any=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Union[str, Any]=9_9 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[int]=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[Any]=0.02 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="None" , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Dict=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = relative_attention
lowerCamelCase_ = position_biased_input
lowerCamelCase_ = pos_att_type
lowerCamelCase_ = scope
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] ):
lowerCamelCase_ = TFDebertaVaModel(config=__UpperCamelCase )
lowerCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : int ):
lowerCamelCase_ = TFDebertaVaForMaskedLM(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFDebertaVaForSequenceClassification(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFDebertaVaForTokenClassification(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int ):
lowerCamelCase_ = TFDebertaVaForQuestionAnswering(config=__UpperCamelCase )
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Dict ):
lowerCamelCase_ = TFDebertaVaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : int ):
lowerCamelCase_ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __A( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def lowercase__ ( self : Optional[int] ):
pass
@slow
def lowercase__ ( self : Any ):
lowerCamelCase_ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowerCamelCase_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
lowerCamelCase_ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 )
| 272
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =StableDiffusionLatentUpscalePipeline
_lowercase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_lowercase =PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_lowercase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase =frozenset([] )
_lowercase =True
@property
def __a ( self ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 4
lowerCAmelCase_ = (16, 16)
lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
def __a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_UpperCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_UpperCamelCase , only_cross_attention=_UpperCamelCase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowerCAmelCase_ = EulerDiscreteScheduler(prediction_type="sample" )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="quick_gelu" , projection_dim=512 , )
lowerCAmelCase_ = CLIPTextModel(_UpperCamelCase )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Dict:
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase_ = pipe(**_UpperCamelCase ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowerCAmelCase_ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCamelCase , 1e-3 )
def __a ( self ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __a ( self ) -> Union[str, Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __a ( self ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __a ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __a ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __a ( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __a ( self ) -> str:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __a ( self ) -> str:
lowerCAmelCase_ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_UpperCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase_ = 2
lowerCAmelCase_ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCAmelCase_ = getattr(_UpperCamelCase , scheduler_enum.name )
lowerCAmelCase_ = scheduler_cls.from_config(pipe.scheduler.config )
lowerCAmelCase_ = pipe(**_UpperCamelCase )[0]
outputs.append(_UpperCamelCase )
assert check_same_shape(_UpperCamelCase )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
lowerCAmelCase_ = torch.manual_seed(33 )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowerCAmelCase_ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowerCAmelCase_ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowerCAmelCase_ = pipe(_UpperCamelCase , generator=_UpperCamelCase , output_type="latent" ).images
lowerCAmelCase_ = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type="np" , ).images[0]
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = torch.manual_seed(33 )
lowerCAmelCase_ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowerCAmelCase_ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowerCAmelCase_ = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type="np" , ).images[0]
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 706
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = "▁"
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_A = {
"google/pegasus-xsum": 5_12,
}
_A = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =VOCAB_FILES_NAMES
_lowercase =PRETRAINED_VOCAB_FILES_MAP
_lowercase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase="<pad>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<mask_2>" , _UpperCamelCase="<mask_1>" , _UpperCamelCase=None , _UpperCamelCase=103 , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_UpperCamelCase )}, but is"""
f""" {type(_UpperCamelCase )}""" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_UpperCamelCase ) , self.offset - 1 )
]
if len(set(_UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , mask_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token_sent=_UpperCamelCase , offset=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def __a ( self ) -> int:
return len(self.sp_model ) + self.offset
def __a ( self ) -> Dict[str, int]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(_UpperCamelCase )
return sp_id + self.offset
def __a ( self , _UpperCamelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __a ( self , _UpperCamelCase=False ) -> Optional[int]:
return 1
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_UpperCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 279
| 0
|
'''simple docstring'''
import operator as op
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
else:
_SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
_SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowercase ) , int(__lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase_ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 418
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ )
| 670
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case_ :
def __init__( self :int ,__snake_case :List[Any] ,__snake_case :Optional[Any]=13 ,__snake_case :Dict=7 ,__snake_case :Dict=True ,__snake_case :Tuple=True ,__snake_case :Tuple=True ,__snake_case :Optional[Any]=99 ,__snake_case :List[str]=32 ,__snake_case :Any=5 ,__snake_case :Optional[int]=4 ,__snake_case :int=37 ,__snake_case :int="gelu" ,__snake_case :Optional[Any]=0.1 ,__snake_case :Any=0.1 ,__snake_case :Union[str, Any]=5_12 ,__snake_case :str=16 ,__snake_case :str=2 ,__snake_case :int=0.02 ,__snake_case :Dict=3 ,__snake_case :Union[str, Any]=4 ,__snake_case :Dict=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Dict ,*__snake_case :Dict ) -> List[Any]:
a__ = OpenAIGPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
a__ = model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :str ,*__snake_case :List[Any] ) -> Union[str, Any]:
a__ = OpenAIGPTLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Any ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :Tuple ,*__snake_case :Optional[int] ) -> List[str]:
a__ = OpenAIGPTDoubleHeadsModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :Dict ,*__snake_case :str ) -> Dict:
a__ = self.num_labels
a__ = OpenAIGPTForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :str ) -> str:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :List[str] ) -> List[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any=False ) -> Optional[Any]:
a__ = super()._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=__snake_case ,)
a__ = inputs_dict['labels']
a__ = inputs_dict['labels']
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=__snake_case ,)
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
a__ = OpenAIGPTModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[Any] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
def lowerCamelCase__( self :str ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__snake_case )
def lowerCamelCase__( self :int ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = OpenAIGPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__snake_case )
a__ = torch.tensor([[4_81, 47_35, 5_44]] ,dtype=torch.long ,device=__snake_case ) # the president is
a__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : str = "cpu" ,lowercase__ : str = "openai/clip-vit-large-patch14" ):
__lowercase = device
__lowercase = CLIPTokenizerFast.from_pretrained(lowercase__ )
__lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__lowercase = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
__lowercase = torchvision.transforms.Resize(2_2_4 )
__lowercase = torchvision.transforms.CenterCrop(2_2_4 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Dict ):
__lowercase = self.resize(lowercase__ )
__lowercase = self.center_crop(lowercase__ )
__lowercase = self.normalize(lowercase__ )
return images
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : Any=None ,**lowercase__ : List[Any] ):
__lowercase = self.tokenizer(text=lowercase__ ,**lowercase__ )
__lowercase = self.preprocess_img(lowercase__ )
__lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[Any]=0.0_1 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : str=None ,lowercase__ : Any=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Union[str, Any]=False ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[Any]="image" ,lowercase__ : Tuple=True ,lowercase__ : Any=False ,lowercase__ : Optional[int]=False ,lowercase__ : Optional[int]=False ,):
super().__init__()
__lowercase = None
__lowercase = device if device else get_device()
if vqgan:
__lowercase = vqgan
else:
__lowercase = load_vqgan(self.device ,conf_path=lowercase__ ,ckpt_path=lowercase__ )
self.vqgan.eval()
if clip:
__lowercase = clip
else:
__lowercase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowercase = ProcessorGradientFlow(device=self.device )
__lowercase = iterations
__lowercase = lr
__lowercase = log
__lowercase = make_grid
__lowercase = return_val
__lowercase = quantize
__lowercase = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any]=None ,lowercase__ : Tuple=None ,lowercase__ : Dict=5 ,lowercase__ : Any=True ):
__lowercase = []
if output_path is None:
__lowercase = '''./animation.gif'''
if input_path is None:
__lowercase = self.save_path
__lowercase = sorted(glob(input_path + '''/*''' ) )
if not len(lowercase__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowercase__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowercase = total_duration / len(lowercase__ )
__lowercase = [frame_duration] * len(lowercase__ )
if extend_frames:
__lowercase = 1.5
__lowercase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowercase__ ) )
imageio.mimsave(lowercase__ ,lowercase__ ,duration=lowercase__ )
print(F"gif saved to {output_path}" )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any]=None ,lowercase__ : str=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowercase = preprocess(Image.open(lowercase__ ) ,target_image_size=2_5_6 ).to(self.device )
__lowercase = preprocess_vqgan(lowercase__ )
__lowercase , *__lowercase = self.vqgan.encode(lowercase__ )
return z
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ):
__lowercase = self.latent.detach().requires_grad_()
__lowercase = base_latent + transform_vector
if self.quantize:
__lowercase , *__lowercase = self.vqgan.quantize(lowercase__ )
else:
__lowercase = trans_latent
return self.vqgan.decode(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Optional[int]=None ):
__lowercase = self.clip_preprocessor(text=lowercase__ ,images=lowercase__ ,return_tensors='''pt''' ,padding=lowercase__ )
__lowercase = self.clip(**lowercase__ )
__lowercase = clip_outputs.logits_per_image
if weights is not None:
__lowercase = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ):
__lowercase = self._get_clip_similarity(pos_prompts['''prompts'''] ,lowercase__ ,weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowercase = self._get_clip_similarity(neg_prompts['''prompts'''] ,lowercase__ ,weights=neg_prompts['''weights'''] )
else:
__lowercase = torch.tensor([1] ,device=self.device )
__lowercase = -torch.log(lowercase__ ) + torch.log(lowercase__ )
return loss
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ,lowercase__ : str ):
__lowercase = torch.randn_like(self.latent ,requires_grad=lowercase__ ,device=self.device )
__lowercase = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase = self._add_vector(lowercase__ )
__lowercase = loop_post_process(lowercase__ )
__lowercase = self._get_CLIP_loss(lowercase__ ,lowercase__ ,lowercase__ )
print('''CLIP loss''' ,lowercase__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowercase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any] ):
wandb.init(reinit=lowercase__ ,project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowercase = Image.open(lowercase__ )
__lowercase = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' ,wandb.Image(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ):
if not prompts:
return []
__lowercase = []
__lowercase = []
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowercase__ ,(tuple, list) ):
__lowercase = prompt[0]
__lowercase = float(prompt[1] )
elif ":" in prompt:
__lowercase , __lowercase = prompt.split(''':''' )
__lowercase = float(lowercase__ )
else:
__lowercase = prompt
__lowercase = 1.0
processed_prompts.append(lowercase__ )
weights.append(lowercase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase__ ,device=self.device ),
}
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any]=None ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=True ,lowercase__ : str=False ,lowercase__ : Any=True ,lowercase__ : List[str]=True ,lowercase__ : int=None ,):
if image_path:
__lowercase = self._get_latent(lowercase__ )
else:
__lowercase = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(lowercase__ ,lowercase__ ,lowercase__ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase = self.process_prompts(lowercase__ )
__lowercase = self.process_prompts(lowercase__ )
if save_final and save_path is None:
__lowercase = os.path.join('''./outputs/''' ,'''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
else:
__lowercase = save_path + '''_''' + get_timestamp()
os.makedirs(lowercase__ )
__lowercase = save_path
__lowercase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowercase__ ) )
__lowercase = loop_post_process(lowercase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ ,lowercase__ ,lowercase__ ) ):
if show_intermediate:
show_pil(lowercase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowercase__ )} )
if show_final:
show_pil(lowercase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}_final.png" ) )
| 41
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase : List[str] = """<<<<<<< This should probably be modified because it mentions: """
__lowerCamelCase : Optional[int] = """=======
>>>>>>>
"""
__lowerCamelCase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__lowerCamelCase : Optional[int] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( _lowerCAmelCase ):
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ : int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , *UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_logger('''datasets-cli/converting''' )
lowerCamelCase_ : Union[str, Any] = tfds_path
lowerCamelCase_ : List[Any] = datasets_directory
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowerCamelCase_ : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCamelCase_ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowerCamelCase_ : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : str = {}
if os.path.isdir(self._tfds_path ):
lowerCamelCase_ : Any = os.listdir(UpperCamelCase_ )
else:
lowerCamelCase_ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowerCamelCase_ : Dict = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowerCamelCase_ : int = f.readlines()
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : List[Any] = []
for line in lines:
lowerCamelCase_ : Any = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase_ : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase_ : Union[str, Any] = ''''''
continue
elif "from absl import logging" in out_line:
lowerCamelCase_ : Optional[Any] = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowerCamelCase_ : str = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : List[Any] = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase_ : Tuple = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase_ : Optional[int] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowerCamelCase_ : Any = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase_ : Tuple = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase_ : Any = f_name.replace('''.py''' , '''''' )
lowerCamelCase_ : Any = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowerCamelCase_ : str = os.path.basename(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 501
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = get_activation("swish" )
self.assertIsInstance(UpperCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = get_activation("silu" )
self.assertIsInstance(UpperCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = get_activation("mish" )
self.assertIsInstance(UpperCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = get_activation("gelu" )
self.assertIsInstance(UpperCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 601
|
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 0
for i in range(1 , int(sqrt(__lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCamelCase ):
total += i + n // i
elif i == sqrt(__lowerCamelCase ):
total += i
return total - n
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1_0000 ):
'''simple docstring'''
lowercase_ = sum(
i
for i in range(1 , __lowerCamelCase )
if sum_of_divisors(sum_of_divisors(__lowerCamelCase ) ) == i and sum_of_divisors(__lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 601
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : str = ["""model.decoder.embed_positions.weights"""]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
if "emb" in name:
snake_case__ = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
snake_case__ = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
snake_case__ = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
snake_case__ = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
snake_case__ = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
snake_case__ = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
snake_case__ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
snake_case__ = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
snake_case__ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
snake_case__ = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case__ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple[Dict, Dict]:
snake_case__ = list(state_dict.keys() )
snake_case__ = {}
for key in keys:
snake_case__ = state_dict.pop(__lowerCAmelCase )
snake_case__ = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case__ = val[:hidden_size, :]
snake_case__ = val[hidden_size : 2 * hidden_size, :]
snake_case__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case__ = val
else:
snake_case__ = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case__ = 1024
snake_case__ = 24
snake_case__ = 16
elif checkpoint == "medium":
snake_case__ = 1536
snake_case__ = 48
snake_case__ = 24
elif checkpoint == "large":
snake_case__ = 2048
snake_case__ = 48
snake_case__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case__ = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="cpu" ) -> Union[str, Any]:
snake_case__ = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
snake_case__ = decoder_config_from_checkpoint(__lowerCAmelCase )
snake_case__ = fairseq_model.lm.state_dict()
snake_case__ , snake_case__ = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
snake_case__ = TaEncoderModel.from_pretrained('''t5-base''' )
snake_case__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
snake_case__ = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case__ , snake_case__ = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__lowerCAmelCase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case__ = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
snake_case__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
snake_case__ = AutoTokenizer.from_pretrained('''t5-base''' )
snake_case__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
snake_case__ = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
snake_case__ = 2048
snake_case__ = 2048
# set other default generation config params
snake_case__ = int(30 * audio_encoder.config.frame_rate )
snake_case__ = True
snake_case__ = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowerCamelCase__ : Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 33
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = DanceDiffusionPipeline
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
_lowerCAmelCase = False
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_UpperCamelCase = IPNDMScheduler()
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a ( self , A_ , A_=0 ):
if str(A_ ).startswith("mps" ):
_UpperCamelCase = torch.manual_seed(A_ )
else:
_UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def a ( self ):
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = DanceDiffusionPipeline(**A_ )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_dummy_inputs(A_ )
_UpperCamelCase = pipe(**A_ )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a ( self ):
return super().test_save_load_local()
@skip_mps
def a ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a ( self ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self ):
return super().test_attention_slicing_forward_pass()
def a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 138
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Optional[int] = StableDiffusionPanoramaPipeline
__magic_name__: Optional[Any] = TEXT_TO_IMAGE_PARAMS
__magic_name__: Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__: Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__: Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case_ : str = DDIMScheduler()
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ : Any = CLIPTextModel(__UpperCamelCase )
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : int , _A : Any , _A : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
snake_case_ : List[str] = torch.manual_seed(__UpperCamelCase )
snake_case_ : List[str] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Union[str, Any] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
snake_case_ : Union[str, Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ : List[Any] = sd_pipe(**__UpperCamelCase ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
snake_case_ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ : List[str] = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ : Optional[Any] = 'french fries'
snake_case_ : Union[str, Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case_ : List[Any] = output.images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : str = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ : Any = sd_pipe(**__UpperCamelCase , view_batch_size=2 )
snake_case_ : Tuple = output.images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[str] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case_ : Optional[int] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
snake_case_ : int = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**__UpperCamelCase ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : int = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : str ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : int = self.get_dummy_components()
snake_case_ : int = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=__UpperCamelCase )
snake_case_ : List[str] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
snake_case_ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ : List[Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ : Dict = sd_pipe(**__UpperCamelCase ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[str] = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any , _A : str=0 ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = torch.manual_seed(__UpperCamelCase )
snake_case_ : str = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'stabilityai/stable-diffusion-2-base'
snake_case_ : Tuple = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder='scheduler' )
snake_case_ : int = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : int = self.get_inputs()
snake_case_ : List[str] = pipe(**__UpperCamelCase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case_ : str = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=__UpperCamelCase )
snake_case_ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Any = self.get_inputs()
snake_case_ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case_ : int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
def callback_fn(_A : int , _A : int , _A : torch.FloatTensor ) -> None:
snake_case_ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case_ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case_ : List[Any] = latents[0, -3:, -3:, -1]
snake_case_ : Tuple = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case_ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case_ : Any = latents[0, -3:, -3:, -1]
snake_case_ : Any = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case_ : Any = False
snake_case_ : Dict = 'stabilityai/stable-diffusion-2-base'
snake_case_ : Dict = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder='scheduler' )
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
snake_case_ : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : str = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : List[Any] = 'stabilityai/stable-diffusion-2-base'
snake_case_ : int = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder='scheduler' )
snake_case_ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
snake_case_ : Tuple = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : int = self.get_inputs()
snake_case_ : Tuple = pipe(**__UpperCamelCase )
snake_case_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 707
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: float
__magic_name__: TreeNode | None = None
__magic_name__: TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( __a ):
# Validation
def is_valid_tree(__a ) -> bool:
if node is None:
return True
if not isinstance(__a , __a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__a , __a , __a ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __a )
)
return is_binary_search_tree_recursive_check(__a , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534
| 0
|
from __future__ import annotations
from typing import Any
class lowerCamelCase__ ( _A):
"""simple docstring"""
pass
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : Any ) -> None:
_A = data
_A = None
def __iter__( self : int ) -> Optional[Any]:
_A = self
_A = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__lowerCAmelCase )
yield node.data
_A = node.next_node
@property
def snake_case_ ( self : str ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ = Node(1)
UpperCAmelCase_ = Node(2)
UpperCAmelCase_ = Node(3)
UpperCAmelCase_ = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ = Node(5)
UpperCAmelCase_ = Node(6)
UpperCAmelCase_ = Node(5)
UpperCAmelCase_ = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ = Node(1)
print(root_node.has_loop) # False
| 2
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Optional[Any] = '''pt'''
elif is_tf_available():
A : List[Any] = '''tf'''
else:
A : Union[str, Any] = '''jax'''
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = ByTaTokenizer
__lowerCamelCase : Tuple = False
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
A__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : int ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=20 , __lowerCAmelCase : List[str]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) )
A__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
A__ = """ """ + output_txt
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
A__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = """Unicode €."""
A__ = tokenizer(__lowerCAmelCase )
A__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """Unicode €.</s>""" )
A__ = tokenizer("""e è é ê ë""" )
A__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = [
"""Summary of the text.""",
"""Another summary.""",
]
A__ = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization. </s>"""]
A__ = ["""Summary of the text. </s>"""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
A__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(__lowerCAmelCase , batch["""labels"""][0] )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
A__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
A__ = [f'<extra_id_{i}>' for i in range(1_25 )]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )]
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_class.from_pretrained(__lowerCAmelCase )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
A__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
A__ = 0
A__ = tokenizer.convert_ids_to_tokens(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 176
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = LxmertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 711
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyInpaintPipeline
_snake_case = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case = False
@property
def A__ ( self ) -> Any:
return 32
@property
def A__ ( self ) -> Tuple:
return 32
@property
def A__ ( self ) -> List[str]:
return self.time_input_dim
@property
def A__ ( self ) -> int:
return self.time_input_dim * 4
@property
def A__ ( self ) -> Optional[int]:
return 100
@property
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ) -> Tuple:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__lowerCAmelCase = MultilingualCLIP(snake_case_ )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**snake_case_ )
return model
@property
def A__ ( self ) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case_ , )
__lowerCAmelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> List[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__lowerCAmelCase = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase = 0
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ) -> Dict:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(snake_case_ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A__ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((768, 768) , dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
__lowerCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 573
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="vit"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=2_24 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ) -> Any:
super().__init__(**UpperCamelCase_ )
__lowercase : Tuple = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : str = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = image_size
__lowercase : List[str] = patch_size
__lowercase : Union[str, Any] = num_channels
__lowercase : str = qkv_bias
__lowercase : int = encoder_stride
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =version.parse("1.11" )
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ) -> float:
return 1E-4
| 76
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'sentencepiece.bpe.model'}
a_ = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
a_ = {
'xlm-roberta-base': 5_1_2,
'xlm-roberta-large': 5_1_2,
'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2,
'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2,
'xlm-roberta-large-finetuned-conll03-english': 5_1_2,
'xlm-roberta-large-finetuned-conll03-german': 5_1_2,
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__lowercase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase : Tuple = 1
__lowercase : Any = len(self.sp_model ) + self.fairseq_offset
__lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[Any]:
__lowercase : int = self.__dict__.copy()
__lowercase : int = None
__lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase : str = {}
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : Dict = [self.cls_token_id]
__lowercase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : Optional[Any] = [self.sep_token_id]
__lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
__lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
__lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 76
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : Dict = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["pixel_values"]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase )
__a = size if size is not None else {'shortest_edge': 256}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a = get_size_dict(lowerCamelCase , param_name='crop_size' )
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = offset
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ) ->np.ndarray:
'''simple docstring'''
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(lowerCamelCase , size['shortest_edge'] , default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
__a = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) ->np.ndarray:
'''simple docstring'''
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase , size=(size['height'], size['width']) , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ) ->Optional[Any]:
'''simple docstring'''
__a = image.astype(np.floataa )
if offset:
__a = image - (scale / 2)
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__a = to_numpy_array(lowerCamelCase )
if do_resize:
__a = self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase )
if do_center_crop:
__a = self.center_crop(lowerCamelCase , size=lowerCamelCase )
if do_rescale:
__a = self.rescale(image=lowerCamelCase , scale=lowerCamelCase , offset=lowerCamelCase )
if do_normalize:
__a = self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase )
__a = to_channel_dimension_format(lowerCamelCase , lowerCamelCase )
return image
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) ->PIL.Image.Image:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = offset if offset is not None else self.offset
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name='crop_size' )
if not valid_images(lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__a = make_batched(lowerCamelCase )
__a = [
[
self._preprocess_image(
image=lowerCamelCase , do_resize=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , do_center_crop=lowerCamelCase , crop_size=lowerCamelCase , do_rescale=lowerCamelCase , rescale_factor=lowerCamelCase , offset=lowerCamelCase , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , data_format=lowerCamelCase , )
for img in video
]
for video in videos
]
__a = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 270
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a = deprecated_arg[3:]
setattr(self , lowerCamelCase , not kwargs.pop(lowerCamelCase ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__a = kwargs.pop('torchscript' , self.torchscript )
__a = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__a = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**lowerCamelCase )
__a =field(default=_lowerCAmelCase , metadata={"help": "Trace the models using torchscript"} )
__a =field(default=_lowerCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __UpperCamelCase ( self ) ->Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__a = torch.device('cpu' )
__a = 0
elif is_torch_tpu_available():
__a = xm.xla_device()
__a = 0
else:
__a = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a = torch.cuda.device_count()
return device, n_gpu
@property
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __UpperCamelCase ( self ) ->"torch.device":
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
return self.n_gpu > 0
| 270
| 1
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowercase : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> int:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''')
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
_lowercase : Dict =parser.parse_args()
_lowercase : Optional[Any] =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 136
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class _a ( __a ):
"""simple docstring"""
A_ = '''camembert'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any]=30_522 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Dict=12 , lowercase_ : Tuple=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=2 , lowercase_ : str=0.0_2 , lowercase_ : int=1e-12 , lowercase_ : str=1 , lowercase_ : List[str]=0 , lowercase_ : int=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , **lowercase_ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
class _a ( __a ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 451
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
A__ , A__ = create_model(
'HTSAT-tiny' , 'roberta' , __UpperCamelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__UpperCamelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def A ( __UpperCamelCase ) -> Any:
A__ = {}
A__ = r'.*sequential.(\d+).*'
A__ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A__ = key.replace(__UpperCamelCase , __UpperCamelCase )
if re.match(__UpperCamelCase , __UpperCamelCase ):
# replace sequential layers with list
A__ = re.match(__UpperCamelCase , __UpperCamelCase ).group(1 )
A__ = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__UpperCamelCase )//3}.linear.''' )
elif re.match(__UpperCamelCase , __UpperCamelCase ):
A__ = int(re.match(__UpperCamelCase , __UpperCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A__ = 1 if projecton_layer == 0 else 2
A__ = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A__ = value
A__ = mixed_qkv.size(0 ) // 3
A__ = mixed_qkv[:qkv_dim]
A__ = mixed_qkv[qkv_dim : qkv_dim * 2]
A__ = mixed_qkv[qkv_dim * 2 :]
A__ = query_layer
A__ = key_layer
A__ = value_layer
else:
A__ = value
return model_state_dict
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Union[str, Any]:
A__ , A__ = init_clap(__UpperCamelCase , enable_fusion=__UpperCamelCase )
clap_model.eval()
A__ = clap_model.state_dict()
A__ = rename_state_dict(__UpperCamelCase )
A__ = ClapConfig()
A__ = enable_fusion
A__ = ClapModel(__UpperCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
transformers_config.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 52
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DiTPipeline
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_a , )
__magic_name__ : int = AutoencoderKL()
__magic_name__ : str = DDIMScheduler()
__magic_name__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu"
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = self.get_dummy_inputs(_a )
__magic_name__ : Any = pipe(**_a ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__magic_name__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__magic_name__ : int = ["vase", "umbrella", "white shark", "white wolf"]
__magic_name__ : str = pipe.get_label_ids(_a )
__magic_name__ : Dict = pipe(_a , generator=_a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : int = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__magic_name__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__magic_name__ : List[str] = ["vase", "umbrella"]
__magic_name__ : Any = pipe.get_label_ids(_a )
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 124
|
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> float:
'''simple docstring'''
def get_matched_characters(_snake_case : str , _snake_case : str ) -> str:
__magic_name__ : str = []
__magic_name__ : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__magic_name__ : str = int(max(0 , i - limit ) )
__magic_name__ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_snake_case )
__magic_name__ : Dict = F'''{_stra[0:_stra.index(_snake_case )]} {_stra[_stra.index(_snake_case ) + 1:]}'''
return "".join(_snake_case )
# matching characters
__magic_name__ : List[Any] = get_matched_characters(_snake_case , _snake_case )
__magic_name__ : Any = get_matched_characters(_snake_case , _snake_case )
__magic_name__ : List[str] = len(_snake_case )
# transposition
__magic_name__ : Tuple = (
len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case ) if ca != ca] ) // 2
)
if not match_count:
__magic_name__ : Tuple = 0.0
else:
__magic_name__ : List[str] = (
1
/ 3
* (
match_count / len(_snake_case )
+ match_count / len(_snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__magic_name__ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 124
| 1
|
'''simple docstring'''
def lowercase_ ( lowercase__ ) ->list[int]:
_snake_case: int = len(lowercase__ )
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if numbers[j] < numbers[i]:
_snake_case , _snake_case: List[str] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A : Dict = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 273
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A : str = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
_SCREAMING_SNAKE_CASE = 42 # Cache store of keys
_SCREAMING_SNAKE_CASE = 42 # References of the keys in cache
_SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache
def __init__( self : List[Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: Dict = deque()
_snake_case: Union[str, Any] = set()
if not n:
_snake_case: Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_snake_case: Tuple = n
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_snake_case: int = self.dq_store.pop()
self.key_reference.remove(__snake_case )
else:
self.dq_store.remove(__snake_case )
self.dq_store.appendleft(__snake_case )
self.key_reference.add(__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
for k in self.dq_store:
print(__snake_case )
def __repr__( self : List[Any] ):
'''simple docstring'''
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
A : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 273
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : List[str] = """conditional_detr"""
_UpperCamelCase : Optional[int] = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=3 , _lowerCAmelCase=3_0_0 , _lowerCAmelCase=6 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=8 , _lowerCAmelCase=6 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=8 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1.0 , _lowerCAmelCase=False , _lowerCAmelCase="sine" , _lowerCAmelCase="resnet50" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=0.25 , **_lowerCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : int = CONFIG_MAPPING["resnet"](out_features=['stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = backbone_config.get('model_type' )
_lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(_lowerCAmelCase )
_lowercase : str = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Union[str, Any] = num_channels
_lowercase : Any = num_queries
_lowercase : Optional[int] = d_model
_lowercase : Union[str, Any] = encoder_ffn_dim
_lowercase : Tuple = encoder_layers
_lowercase : List[str] = encoder_attention_heads
_lowercase : Any = decoder_ffn_dim
_lowercase : Tuple = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : List[str] = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : List[str] = activation_function
_lowercase : Optional[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : Any = encoder_layerdrop
_lowercase : Dict = decoder_layerdrop
_lowercase : List[str] = encoder_layers
_lowercase : Optional[Any] = auxiliary_loss
_lowercase : Tuple = position_embedding_type
_lowercase : Union[str, Any] = backbone
_lowercase : Optional[int] = use_pretrained_backbone
_lowercase : str = dilation
# Hungarian matcher
_lowercase : Any = class_cost
_lowercase : Optional[Any] = bbox_cost
_lowercase : Tuple = giou_cost
# Loss coefficients
_lowercase : str = mask_loss_coefficient
_lowercase : Optional[int] = dice_loss_coefficient
_lowercase : str = cls_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Tuple = giou_loss_coefficient
_lowercase : str = focal_alpha
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return self.encoder_attention_heads
@property
def __a ( self ):
return self.d_model
def __a ( self ):
_lowercase : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowercase : List[Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = version.parse("1.11" )
@property
def __a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __a ( self ):
return 1E-5
@property
def __a ( self ):
return 1_2
| 66
|
from __future__ import annotations
import math
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = u
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp * (u - i)
return temp
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = int(input("enter the numbers of values: " ) )
SCREAMING_SNAKE_CASE_ :list[list[float]] = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
SCREAMING_SNAKE_CASE_ :Any = 0
print("enter the values of parameters in a list: " )
SCREAMING_SNAKE_CASE_ :Dict = list(map(a , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
SCREAMING_SNAKE_CASE_ :List[Any] = float(input() )
SCREAMING_SNAKE_CASE_ :Optional[Any] = int(input("enter the value to interpolate: " ) )
SCREAMING_SNAKE_CASE_ :str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE_ :List[str] = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE_ :Tuple = y[0][0]
for i in range(1 , a ):
summ += (ucal(a , a ) * y[0][i]) / math.factorial(a )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 631
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , _snake_case=True , _snake_case=1 / 2_55 , _snake_case=True , ):
"""simple docstring"""
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , _snake_case , _snake_case=False ):
"""simple docstring"""
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(_snake_case , Image.Image ):
__lowerCamelCase , __lowerCamelCase = image.size
else:
__lowerCamelCase , __lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
__lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = self.size['''shortest_edge''']
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(_snake_case , key=lambda _snake_case : item[0] )[0]
__lowerCamelCase = max(_snake_case , key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , _snake_case )
__lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__lowerCamelCase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
__lowerCamelCase = image_processing(images=_snake_case , annotations=_snake_case , return_tensors='''pt''' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _snake_case )
__lowerCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _snake_case , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _snake_case ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _snake_case )
__lowerCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _snake_case , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _snake_case ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _snake_case ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _snake_case ) )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _snake_case ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _snake_case ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__lowerCamelCase = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
__lowerCamelCase = image_processing(images=_snake_case , annotations=_snake_case , masks_path=_snake_case , return_tensors='''pt''' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _snake_case )
__lowerCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _snake_case , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _snake_case ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _snake_case )
__lowerCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _snake_case , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _snake_case ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _snake_case ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _snake_case ) )
# verify masks
__lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _snake_case )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _snake_case ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _snake_case ) )
| 710
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
__lowerCamelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = '''<unk> UNwanted , running'''
__lowerCamelCase = '''<unk> unwanted, running'''
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_snake_case )
__lowerCamelCase = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_snake_case , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [0, 4, 8, 7] )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TransfoXLTokenizer(lower_case=_snake_case )
__lowerCamelCase = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__lowerCamelCase = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ) , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = len(_snake_case )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 575
| 0
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__UpperCAmelCase =logging.getLogger(__name__)
class a__ ( a_ ):
def SCREAMING_SNAKE_CASE__ ( self : str , a : int , a : List[Any] , a : List[str]=None , a : str=None ):
"""simple docstring"""
__lowerCamelCase = self.layer[current_layer](_a , _a , head_mask[current_layer] )
__lowerCamelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , a_ , )
class a__ ( a_ ):
def __init__( self : Dict , a : Optional[int] ):
"""simple docstring"""
super().__init__(_a )
__lowerCamelCase = BertEncoderWithPabee(_a )
self.init_weights()
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = threshold
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int ):
"""simple docstring"""
__lowerCamelCase = patience
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.inference_layers_num / self.inference_instances_num
__lowerCamelCase = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[str, Any]=None , a : Any=None , a : Optional[int]=None , a : List[str]=None , a : Dict=None , a : Union[str, Any]=None , a : Dict=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : Union[str, Any]=None , a : Dict=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCamelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase = torch.ones(_a , device=_a )
if token_type_ids is None:
__lowerCamelCase = torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase = self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowerCamelCase = encoder_hidden_states.size()
__lowerCamelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowerCamelCase = torch.ones(_a , device=_a )
__lowerCamelCase = self.invert_attention_mask(_a )
else:
__lowerCamelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase = self.get_head_mask(_a , self.config.num_hidden_layers )
__lowerCamelCase = self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
__lowerCamelCase = embedding_output
if self.training:
__lowerCamelCase = []
for i in range(self.config.num_hidden_layers ):
__lowerCamelCase = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
__lowerCamelCase = self.pooler(_a )
__lowerCamelCase = output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
__lowerCamelCase = self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__lowerCamelCase = self.pooler(encoder_outputs[0] )
__lowerCamelCase = [output_layers[self.config.num_hidden_layers - 1](_a )]
else:
__lowerCamelCase = 0
__lowerCamelCase = None
__lowerCamelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowerCamelCase = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
__lowerCamelCase = self.pooler(_a )
__lowerCamelCase = output_layers[i](_a )
if regression:
__lowerCamelCase = logits.detach()
if patient_result is not None:
__lowerCamelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowerCamelCase = 0
else:
__lowerCamelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowerCamelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
__lowerCamelCase = 0
__lowerCamelCase = logits
if patient_counter == self.patience:
break
__lowerCamelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , a_ , )
class a__ ( a_ ):
def __init__( self : List[str] , a : Optional[Any] ):
"""simple docstring"""
super().__init__(_a )
__lowerCamelCase = config.num_labels
__lowerCamelCase = BertModelWithPabee(_a )
__lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Tuple=None , a : List[str]=None , a : List[str]=None , a : Optional[Any]=None , a : Optional[Any]=None , a : int=None , a : Optional[Any]=None , ):
"""simple docstring"""
__lowerCamelCase = self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowerCamelCase = (logits[-1],)
if labels is not None:
__lowerCamelCase = None
__lowerCamelCase = 0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowerCamelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowerCamelCase = (total_loss / total_weights,) + outputs
return outputs
| 546
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : str = logging.get_logger(__name__)
_A : Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a__ ( a_ ):
__lowerCAmelCase = """big_bird"""
def __init__( self , _a=50_358 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=4_096 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=True , _a=0 , _a=1 , _a=2 , _a=66 , _a="block_sparse" , _a=True , _a=False , _a=64 , _a=3 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
lowercase : List[Any] = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : str = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : int = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Tuple = initializer_range
lowercase : Optional[int] = type_vocab_size
lowercase : str = layer_norm_eps
lowercase : Tuple = use_cache
lowercase : Any = rescale_embeddings
lowercase : List[str] = attention_type
lowercase : int = use_bias
lowercase : Dict = block_size
lowercase : List[str] = num_random_blocks
lowercase : int = classifier_dropout
class a__ ( a_ ):
@property
def __magic_name__ ( self ):
if self.task == "multiple-choice":
lowercase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 361
| 0
|
from manim import *
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE_: int =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: List[Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Optional[int] =VGroup(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE_: str =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =[mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Dict =Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE_: str =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
gpu.align_to(lowerCAmelCase , lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: Tuple =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE_: List[Any] =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCAmelCase , run_time=1 ) , Create(lowerCAmelCase , run_time=1 ) , Create(lowerCAmelCase , run_time=1 ) , )
SCREAMING_SNAKE_CASE_: List[Any] =MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_: Any =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase , run_time=2.5 ) , Write(lowerCAmelCase ) , Write(lowerCAmelCase ) )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
SCREAMING_SNAKE_CASE_: List[Any] =[]
for i, rect in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(lowerCAmelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_: str =0.4_6 / 4
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase , buff=0.0 )
cpu_targs.append(lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCAmelCase ) )
second_animations.append(MoveToTarget(lowerCAmelCase , run_time=1.5 ) )
self.play(*lowerCAmelCase )
self.play(*lowerCAmelCase )
self.wait()
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=10 ):
lowerCamelCase_ = []
for _ in range(snake_case_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=10 ):
lowerCamelCase_ = []
for step in range(snake_case_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(snake_case_ , "schedule.bin" )
torch.save(scheduler.state_dict() , snake_case_ )
lowerCamelCase_ = torch.load(snake_case_ )
scheduler.load_state_dict(snake_case_ )
return lrs
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a )
lowerCamelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase_ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCamelCase_ = criterion(_a , _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a )
lowerCamelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase_ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_a , weight_decay=0.0 , relative_step=_a , scale_parameter=_a , warmup_init=_a , )
for _ in range(1000 ):
lowerCamelCase_ = criterion(_a , _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
_lowerCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_lowerCamelCase = 10
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a , msg=_a )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCamelCase_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCamelCase_ = data
lowerCamelCase_ = scheduler_func(self.optimizer , **_a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCamelCase_ = unwrap_schedule(_a , self.num_steps )
self.assertListAlmostEqual(
_a , _a , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
lowerCamelCase_ = scheduler_func(self.optimizer , **_a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_a ) # wrap to test picklability of the schedule
lowerCamelCase_ = unwrap_and_save_reload_schedule(_a , self.num_steps )
self.assertListEqual(_a , _a , msg=f'''failed for {scheduler_func} in save and reload''' )
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = fn
def __call__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.fn(*_a , **_a )
@classmethod
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = list(map(self , scheduler.lr_lambdas ) )
| 675
|
import string
import numpy
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return b if a == 0 else greatest_common_divisor(b % a,snake_case_ )
class lowercase :
_a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_a = numpy.vectorize(lambda UpperCamelCase__ : x % 3_6 )
_a = numpy.vectorize(UpperCamelCase__ )
def __init__( self , _a ) -> None:
_A : Dict = self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : str = encrypt_key.shape[0]
def a__ ( self , _a ) -> int:
return self.key_string.index(_a )
def a__ ( self , _a ) -> str:
return self.key_string[round(_a )]
def a__ ( self ) -> None:
_A : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_A : Union[str, Any] = det % len(self.key_string )
_A : Optional[Any] = len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
_A : List[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_a )
def a__ ( self , _a ) -> str:
_A : List[str] = [char for char in text.upper() if char in self.key_string]
_A : Tuple = chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def a__ ( self , _a ) -> str:
_A : int = self.process_text(text.upper() )
_A : int = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_A : Dict = text[i : i + self.break_key]
_A : int = [self.replace_letters(_a ) for char in batch]
_A : str = numpy.array([vec] ).T
_A : Optional[Any] = self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
_A : Tuple = """""".join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def a__ ( self ) -> numpy.ndarray:
_A : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_A : str = det % len(self.key_string )
_A : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_A : Dict = i
break
_A : Union[str, Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper() )
_A : Optional[int] = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_A : Optional[int] = text[i : i + self.break_key]
_A : List[Any] = [self.replace_letters(_a ) for char in batch]
_A : Tuple = numpy.array([vec] ).T
_A : List[str] = self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
_A : str = """""".join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = int(input("""Enter the order of the encryption key: """ ) )
_A : Optional[int] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case_ ):
_A : Any = [int(snake_case_ ) for x in input().split()]
hill_matrix.append(snake_case_ )
_A : Dict = HillCipher(numpy.array(snake_case_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_A : str = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_A : str = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case_ ) )
elif option == "2":
_A : Tuple = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 307
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
def merge(lowerCamelCase_ , lowerCamelCase_) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0)
yield from left
yield from right
return list(_merge())
if len(lowerCamelCase_) <= 1:
return collection
a__ = len(lowerCamelCase_) // 2
return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:]))
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__a : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 200
|
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[int] , __A: Union[str, Any] , __A: str , __A: Optional[Any] , __A: Any=True ):
'''simple docstring'''
a__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a__ = resnet(__A , __A , deterministic=__A )
a__ = attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Dict , __A: int , __A: Dict , __A: Optional[Any]=True ):
'''simple docstring'''
a__ = ()
for resnet in self.resnets:
a__ = resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Any , __A: Optional[int] , __A: List[Any] , __A: List[str] , __A: Optional[Any] , __A: Any=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a__ = resnet(__A , __A , deterministic=__A )
a__ = attn(__A , __A , deterministic=__A )
if self.add_upsample:
a__ = self.upsamplers_a(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: str ):
'''simple docstring'''
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , __A: Optional[Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Dict=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a__ = resnet(__A , __A , deterministic=__A )
if self.add_upsample:
a__ = self.upsamplers_a(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =jnp.floataa
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
a__ = resnets
a__ = attentions
def __call__( self: Any , __A: Optional[int] , __A: int , __A: Tuple , __A: str=True ):
'''simple docstring'''
a__ = self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a__ = attn(__A , __A , deterministic=__A )
a__ = resnet(__A , __A , deterministic=__A )
return hidden_states
| 200
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Union[str, Any] = StableDiffusionXLImgaImgPipeline
lowercase_ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowercase_ : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowercase_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[str] ):
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
__snake_case = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
__snake_case = CLIPTextModel(__lowerCAmelCase )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__lowerCAmelCase )
__snake_case = CLIPTextModelWithProjection(__lowerCAmelCase )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__lowerCAmelCase )
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=0 ):
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
__snake_case = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith('mps' ):
__snake_case = torch.manual_seed(__lowerCAmelCase )
else:
__snake_case = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def lowercase__ ( self : Optional[Any] ):
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
__snake_case = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = self.get_dummy_inputs(__lowerCAmelCase )
__snake_case = sd_pipe(**__lowerCAmelCase ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__snake_case = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Dict ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowercase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Optional[Any] ):
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
__snake_case = sd_pipe.to(__lowerCAmelCase )
__snake_case = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
__snake_case = self.get_dummy_inputs(__lowerCAmelCase )
__snake_case = 3 * ['this is a negative prompt']
__snake_case = negative_prompt
__snake_case = 3 * [inputs['prompt']]
__snake_case = sd_pipe(**__lowerCAmelCase )
__snake_case = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__snake_case = self.get_dummy_inputs(__lowerCAmelCase )
__snake_case = 3 * ['this is a negative prompt']
__snake_case = 3 * [inputs.pop('prompt' )]
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
__snake_case = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
__snake_case = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]="cpu" , __lowerCAmelCase : Union[str, Any]=torch.floataa , __lowerCAmelCase : Any=0 ):
__snake_case = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__snake_case = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
__snake_case = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
__snake_case = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Dict ):
__snake_case = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = self.get_inputs(__lowerCAmelCase )
__snake_case = pipe(**__lowerCAmelCase ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 356
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a_ :
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]=1_3 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : str=4 , __lowerCAmelCase : List[str]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = self.vocab_size - 1
def lowercase__ ( self : Union[str, Any] ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : List[str] ):
__snake_case = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , *__lowerCAmelCase : str ):
__snake_case = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Tuple ):
__snake_case = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , *__lowerCAmelCase : int ):
__snake_case = self.num_labels
__snake_case = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase__ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ):
__snake_case = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
__snake_case = inputs_dict['labels']
__snake_case = inputs_dict['labels']
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowercase__ ( self : Tuple ):
__snake_case = OpenAIGPTModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def lowercase__ ( self : int ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def lowercase__ ( self : str ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : str ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : int ):
__snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__lowerCAmelCase )
__snake_case = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
__snake_case = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__snake_case = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 356
| 1
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_A = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_A = logging.WARNING
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : List[str] = os.getenv("""DATASETS_VERBOSITY""" , __UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase_ ( ) -> str:
return __name__.split(""".""" )[0]
def lowercase_ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase_ ( ) -> None:
# Apply our default configuration to the library root logger.
lowerCAmelCase__ : Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : int = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( __UpperCAmelCase = None ) -> logging.Logger:
if name is None:
lowerCAmelCase__ : Union[str, Any] = _get_library_name()
return logging.getLogger(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( __UpperCAmelCase ) -> None:
_get_library_root_logger().setLevel(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> str:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[int] = False
def lowercase_ ( ) -> None:
lowerCAmelCase__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : Dict ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
def empty_fn(*UpperCamelCase : Optional[int] , **UpperCamelCase : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return
_A = True
class _lowerCamelCase :
def __call__( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=False , **UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase , **UpperCamelCase )
else:
return EmptyTqdm(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_A = _tqdm_cls()
def lowercase_ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ) -> Any:
global _tqdm_active
lowerCAmelCase__ : List[str] = True
def lowercase_ ( ) -> Tuple:
global _tqdm_active
lowerCAmelCase__ : List[str] = False
| 700
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_A = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_A = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = None
# source code of `config_class`
lowerCAmelCase__ : List[Any] = inspect.getsource(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
lowerCAmelCase__ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : Optional[Any] = ckpt_name
break
return checkpoint
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Dict = get_checkpoint_from_config_class(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : int = """\n""".join(sorted(__UpperCAmelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 507
| 0
|
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE__ = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 100
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_UpperCamelCase = TaTokenizerFast
_UpperCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_UpperCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 453
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__a : List[Any] = logging.get_logger(__name__)
__a : Optional[Any] = """▁"""
__a : Optional[Any] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__a : List[Any] = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
__a : Tuple = {
"""facebook/m2m100_418M""": 1_0_2_4,
}
# fmt: off
__a : Union[str, Any] = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = ['''input_ids''', '''attention_mask''']
__a : List[int] = []
__a : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="m2m100" , lowerCAmelCase__ = None , lowerCAmelCase__=8 , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowercase = language_codes
__lowercase = FAIRSEQ_LANGUAGE_CODES[language_codes]
__lowercase = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
__lowercase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCAmelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCAmelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , language_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowerCAmelCase__ , **lowerCAmelCase__ , )
__lowercase = vocab_file
__lowercase = load_json(lowerCAmelCase__ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = spm_file
__lowercase = load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
__lowercase = len(self.encoder )
__lowercase = {
self.get_lang_token(lowerCAmelCase__ ): self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )
}
__lowercase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )}
__lowercase = {v: k for k, v in self.lang_token_to_id.items()}
__lowercase = src_lang if src_lang is not None else '''en'''
__lowercase = tgt_lang
__lowercase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__lowercase = num_madeup_words
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = load_spm(self.spm_file , self.sp_model_kwargs )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = Path(lowerCAmelCase__ )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
__lowercase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__lowercase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro" , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
__lowercase = src_lang
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__lowercase = src_lang
__lowercase = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = self.get_lang_id(lowerCAmelCase__ )
__lowercase = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = self.get_lang_token(lowerCAmelCase__ )
__lowercase = self.lang_token_to_id[lang_token]
__lowercase = [self.cur_lang_id]
__lowercase = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = self.get_lang_token(lowerCAmelCase__ )
__lowercase = self.lang_token_to_id[lang_token]
__lowercase = [self.cur_lang_id]
__lowercase = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = self.get_lang_token(lowerCAmelCase__ )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(lowercase , '''r''' ) as f:
return json.load(lowercase )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
with open(lowercase , '''w''' ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 522
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__a : Union[str, Any] = 2_5_0_0_0_4
__a : Optional[int] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : str = MBartTokenizer
__a : int = MBartTokenizerFast
__a : List[Any] = True
__a : int = True
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = '''facebook/mbart-large-en-ro'''
__a : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__a : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__a : List[Any] = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ) -> Any:
'''simple docstring'''
__lowercase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowercase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__lowercase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowercase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__lowercase = 10
__lowercase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 522
| 1
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE_ = '''true'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase=82 , _lowerCAmelCase=16 ):
set_seed(42 )
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(_lowerCAmelCase )
__lowerCAmelCase = RegressionDataset(length=_lowerCAmelCase )
__lowerCAmelCase = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase )
model.to(accelerator.device )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return model, ddp_model, dataloader
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
__lowerCAmelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_lowerCAmelCase ):
__lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
__lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=16 )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = Accelerator(dispatch_batches=_lowerCAmelCase , split_batches=_lowerCAmelCase )
__lowerCAmelCase = get_dataloader(_lowerCAmelCase , not dispatch_batches )
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def lowercase (_lowerCAmelCase , _lowerCAmelCase=82 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=16 ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}"""
def lowercase (_lowerCAmelCase = False , _lowerCAmelCase = False ):
__lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(_lowerCAmelCase , _lowerCAmelCase )
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
__lowerCAmelCase = model(**_lowerCAmelCase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase , references=batch["""labels"""] )
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**_lowerCAmelCase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase = batch["""labels"""]
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowercase ():
__lowerCAmelCase = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_lowerCAmelCase , _lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_lowerCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
__lowerCAmelCase = Accelerator()
test_torch_metrics(_lowerCAmelCase , 512 )
accelerator.state._reset_state()
def lowercase (_lowerCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 465
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE_ = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowercase (_lowerCAmelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE_ = importlib.import_module('''transformers''')
SCREAMING_SNAKE_CASE_ = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE_ = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 465
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a__ : List[str] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a__ : Optional[int] = [0, 2_5, 5_0]
a__ : Tuple = [2_5, 5_0, 7_5]
a__ : Optional[Any] = fuzz.membership.trimf(X, abca)
a__ : Dict = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a__ : Optional[Any] = np.ones(7_5)
a__ : Dict = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
a__ : Optional[int] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a__ : Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a__ : Optional[int] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a__ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a__ : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a__ : str = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a__ : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 720
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Dict , a_ : Any , a_ : str ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _UpperCamelCase ( self : Dict , a_ : List[str] , a_ : Optional[int] ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Any , a_ : Optional[Any] , a_ : Optional[int] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _UpperCamelCase ( self : List[Any] , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
def snake_case ():
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def snake_case ():
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(a_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 235
| 0
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__lowerCAmelCase = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__lowerCAmelCase = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__lowerCAmelCase = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __lowercase ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] ,reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : List[Any]=False ):
_a : List[str] = compute_bleu(
reference_corpus=lowerCamelCase_ ,translation_corpus=lowerCamelCase_ ,max_order=lowerCamelCase_ ,smooth=lowerCamelCase_ )
(_a) : List[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 358
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Any, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str]=13, lowerCamelCase_: Optional[Any]=7, lowerCamelCase_: Optional[Any]=True, lowerCamelCase_: Tuple=True, lowerCamelCase_: Any=False, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: Optional[Any]=99, lowerCamelCase_: Tuple=32, lowerCamelCase_: Any=5, lowerCamelCase_: Tuple=4, lowerCamelCase_: List[Any]=37, lowerCamelCase_: Union[str, Any]="gelu", lowerCamelCase_: str=0.1, lowerCamelCase_: Union[str, Any]=0.1, lowerCamelCase_: Any=512, lowerCamelCase_: Union[str, Any]=16, lowerCamelCase_: Any=2, lowerCamelCase_: str=0.0_2, lowerCamelCase_: Union[str, Any]=3, lowerCamelCase_: List[str]=4, lowerCamelCase_: Tuple=None, ):
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Optional[int] = num_choices
lowercase__ : Dict = scope
def snake_case__( self: Union[str, Any] ):
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size], self.num_choices )
lowercase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self: Tuple ):
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, use_stable_embedding=lowerCamelCase_, )
def snake_case__( self: Optional[int], lowerCamelCase_: Optional[int], lowerCamelCase_: List[Any], lowerCamelCase_: List[str], lowerCamelCase_: Optional[int], lowerCamelCase_: Dict, lowerCamelCase_: Optional[int], lowerCamelCase_: str ):
lowercase__ : Union[str, Any] = OpenLlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Union[str, Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowercase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Tuple, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str], lowerCamelCase_: Dict, lowerCamelCase_: Union[str, Any], ):
lowercase__ : Tuple = True
lowercase__ : int = OpenLlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, )
lowercase__ : Optional[int] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, )
lowercase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: List[Any], lowerCamelCase_: Optional[Any], lowerCamelCase_: Any, lowerCamelCase_: str, lowerCamelCase_: List[str], lowerCamelCase_: Any, lowerCamelCase_: Dict, lowerCamelCase_: int, lowerCamelCase_: Any, lowerCamelCase_: str, ):
lowercase__ : Optional[Any] = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self: Tuple, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Optional[int], lowerCamelCase_: str, lowerCamelCase_: List[Any], lowerCamelCase_: List[Any], lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Union[str, Any], ):
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowercase__ : Any = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_, )
lowercase__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
lowercase__ : str = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowercase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowercase__ : List[str] = torch.cat([input_mask, next_mask], dim=-1 )
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowercase__ : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowercase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-3 ) )
def snake_case__( self: Optional[Any] ):
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def snake_case__( self: Any ):
lowercase__ : List[Any] = OpenLlamaModelTester(self )
lowercase__ : Dict = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=37 )
def snake_case__( self: Any ):
self.config_tester.run_common_tests()
def snake_case__( self: Dict ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: str ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Dict = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = 3
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : int = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Optional[Any] = 'single_label_classification'
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : str = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Dict = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Union[str, Any] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : List[Any] = 'multi_label_classification'
lowercase__ : Any = input_dict['input_ids']
lowercase__ : int = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[int] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__( self: List[str] ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__( self: Any, lowerCamelCase_: List[Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = ids_tensor([1, 10], config.vocab_size )
lowercase__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : str = OpenLlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowercase__ : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : List[str] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : List[str] = {'type': scaling_type, 'factor': 1_0.0}
lowercase__ : Union[str, Any] = OpenLlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowercase__ : Dict = scaled_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : Union[str, Any] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
| 266
| 0
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=sys.maxsize ):
"""simple docstring"""
lowercase = """bilinear"""
lowercase = max_size
lowercase = short_edge_length
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(__lowerCAmelCase , __lowerCAmelCase )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(__lowerCAmelCase , __lowerCAmelCase ) > self.max_size:
lowercase = self.max_size * 1.0 / max(__lowerCAmelCase , __lowerCAmelCase )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(__lowerCAmelCase )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(__lowerCAmelCase )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
__lowerCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCAmelCase ).squeeze(0 )
img_augs.append(__lowerCAmelCase )
return img_augs
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda __lowerCAmelCase : (x - self.pixel_mean) / self.pixel_std
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = tuple(max(__lowerCAmelCase ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
__lowerCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCAmelCase , __lowerCAmelCase )
]
return torch.stack(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = [images]
if single_image:
assert len(__lowerCAmelCase ) == 1
for i in range(len(__lowerCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCAmelCase , images.pop(__lowerCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCAmelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(__lowerCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(__lowerCAmelCase ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(__lowerCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(__lowerCAmelCase , __lowerCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple[int, int] ) -> List[Any]:
'''simple docstring'''
assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase__ )
| 197
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] , lowerCAmelCase__ :list[float] ) -> float:
'''simple docstring'''
lowercase = sorted(numsa + numsa )
lowercase , lowercase = divmod(len(lowerCAmelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[Any] =[float(x) for x in input("""Enter the elements of first array: """).split()]
__lowerCAmelCase : List[Any] =[float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 197
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCamelCase__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 12
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Dict , *a :Tuple , **a :List[Any] ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , a , )
super().__init__(*a , **a )
| 557
| 0
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase =[
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
UpperCamelCase =[
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
UpperCamelCase =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase =f"down_blocks.{i}.resnets.{j}."
UpperCamelCase =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase =f"down_blocks.{i}.attentions.{j}."
UpperCamelCase =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase =f"up_blocks.{i}.resnets.{j}."
UpperCamelCase =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase =f"up_blocks.{i}.attentions.{j}."
UpperCamelCase =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase =f"down_blocks.{i}.downsamplers.0.conv."
UpperCamelCase =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase =f"up_blocks.{i}.upsamplers.0."
UpperCamelCase =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase ="mid_block.attentions.0."
UpperCamelCase ="middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase =f"mid_block.resnets.{j}."
UpperCamelCase =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def snake_case ( a_ : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCamelCase_ : Union[str, Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCamelCase_ : List[Any] = v.replace(a_ , a_ )
UpperCamelCase_ : Optional[int] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCamelCase_ : Dict = v.replace(a_ , a_ )
UpperCamelCase_ : List[Any] = v
UpperCamelCase_ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase =[
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase =f"encoder.down_blocks.{i}.resnets.{j}."
UpperCamelCase =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase =f"down_blocks.{i}.downsamplers.0."
UpperCamelCase =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase =f"up_blocks.{i}.upsamplers.0."
UpperCamelCase =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase =f"decoder.up_blocks.{i}.resnets.{j}."
UpperCamelCase =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase =f"mid_block.resnets.{i}."
UpperCamelCase =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase =[
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def snake_case ( a_ : Optional[int] ) -> str:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCamelCase_ : str = v.replace(a_ , a_ )
UpperCamelCase_ : List[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCamelCase_ : Optional[Any] = v.replace(a_ , a_ )
UpperCamelCase_ : List[str] = v
UpperCamelCase_ : Optional[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCamelCase_ : List[str] = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
UpperCamelCase_ : Tuple = reshape_weight_for_sd(a_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase =[
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
UpperCamelCase ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase =re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase ={"q": 0, "k": 1, "v": 2}
def snake_case ( a_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = {}
UpperCamelCase_ : Optional[int] = {}
UpperCamelCase_ : List[str] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
UpperCamelCase_ : Optional[int] = k[: -len(""".q_proj.weight""" )]
UpperCamelCase_ : Dict = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
UpperCamelCase_ : Union[str, Any] = [None, None, None]
UpperCamelCase_ : str = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
UpperCamelCase_ : List[Any] = k[: -len(""".q_proj.bias""" )]
UpperCamelCase_ : Any = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
UpperCamelCase_ : str = [None, None, None]
UpperCamelCase_ : Dict = v
continue
UpperCamelCase_ : Optional[Any] = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] , a_ )
UpperCamelCase_ : Optional[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCamelCase_ : Dict = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] , a_ )
UpperCamelCase_ : Dict = torch.cat(a_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCamelCase_ : Dict = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] , a_ )
UpperCamelCase_ : Tuple = torch.cat(a_ )
return new_state_dict
def snake_case ( a_ : List[Any] ) -> int:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
UpperCamelCase =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase =osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
UpperCamelCase =osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
UpperCamelCase =osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase =load_file(unet_path, device="cpu")
else:
UpperCamelCase =osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
UpperCamelCase =torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
UpperCamelCase =load_file(vae_path, device="cpu")
else:
UpperCamelCase =osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
UpperCamelCase =torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
UpperCamelCase =load_file(text_enc_path, device="cpu")
else:
UpperCamelCase =osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
UpperCamelCase =torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
UpperCamelCase =convert_unet_state_dict(unet_state_dict)
UpperCamelCase ={"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase =convert_vae_state_dict(vae_state_dict)
UpperCamelCase ={"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase ="text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase ={"transformer." + k: v for k, v in text_enc_dict.items()}
UpperCamelCase =convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase ={"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase =convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase ={"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase ={"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 702
|
'''simple docstring'''
import random
def snake_case ( a_ : int , a_ : float , a_ : bool = False ) -> dict:
"""simple docstring"""
UpperCamelCase_ : dict = {i: [] for i in range(a_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_ ):
for j in range(i + 1 , a_ ):
if random.random() < probability:
graph[i].append(a_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_ )
return graph
def snake_case ( a_ : int ) -> dict:
"""simple docstring"""
return {
i: [j for j in range(a_ ) if i != j] for i in range(a_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 543
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=3 , lowerCamelCase_=1_8 , lowerCamelCase_=3_0 , lowerCamelCase_=4_0_0 , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=True , ) -> Union[str, Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size_divisor
UpperCamelCase = do_rescale
def UpperCAmelCase__ ( self) -> Any:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = GLPNImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''size_divisor'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''resample'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''do_rescale'''))
def UpperCAmelCase__ ( self) -> str:
pass
def UpperCAmelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def UpperCAmelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def UpperCAmelCase__ ( self) -> Optional[int]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 34
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( __a ):
def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any]=13 , snake_case__ : Dict=7 , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : Any=True , snake_case__ : Union[str, Any]=False , snake_case__ : Any=False , snake_case__ : Optional[int]=False , snake_case__ : int=2 , snake_case__ : str=99 , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=32 , snake_case__ : Any=5 , snake_case__ : str=4 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : str=5_12 , snake_case__ : Union[str, Any]=12 , snake_case__ : Any=2 , snake_case__ : Tuple=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : Tuple=4 , snake_case__ : Union[str, Any]="last" , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_lengths
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = gelu_activation
lowercase = sinusoidal_embeddings
lowercase = causal
lowercase = asm
lowercase = n_langs
lowercase = vocab_size
lowercase = n_special
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = summary_type
lowercase = use_proj
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_input_lengths:
lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , 2 ).float()
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[Any] , ):
lowercase = FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowercase = model(snake_case__ , langs=snake_case__ )
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , ):
lowercase = FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , ):
lowercase = FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str , ):
lowercase = FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowercase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((lowercase) , ) = result_with_labels.to_tuple()
lowercase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((lowercase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , ):
lowercase = FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : int , ):
lowercase = self.num_labels
lowercase = FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , ):
lowercase = self.num_choices
lowercase = FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __a , __a , unittest.TestCase ):
_A :List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A :Optional[Any] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[Any]=False ):
lowercase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase = FlaubertModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase = True
lowercase = model_class(config=snake_case__ )
lowercase = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase = torch.jit.trace(
snake_case__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , """traced_model.pt""" ) )
lowercase = torch.jit.load(os.path.join(snake_case__ , """traced_model.pt""" ) , map_location=snake_case__ )
loaded(inputs_dict["""input_ids"""].to(snake_case__ ) , inputs_dict["""attention_mask"""].to(snake_case__ ) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
lowercase = model(snake_case__ )[0]
lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
lowercase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 428
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def _a ( self : int ,_a : Optional[int]=0 ):
'''simple docstring'''
A_ : Any = np.random.RandomState(_a )
A_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = self.get_dummy_inputs()
A_ : List[Any] = pipe(**_a ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : List[Any] = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
A_ : Any = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Dict = self.get_dummy_inputs()
A_ : Union[str, Any] = pipe(**_a ).images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : int = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
A_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = self.get_dummy_inputs()
A_ : Optional[int] = pipe(**_a ).images
A_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : int = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
A_ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
A_ : Dict = self.get_dummy_inputs()
A_ : Union[str, Any] = pipe(**_a ).images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : Dict = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
A_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = self.get_dummy_inputs()
A_ : Any = pipe(**_a ).images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : Tuple = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
A_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = self.get_dummy_inputs()
A_ : int = pipe(**_a ).images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : Union[str, Any] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
A_ : str = self.get_dummy_inputs()
A_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
A_ : Optional[Any] = pipe(**_a )
A_ : Any = output.images[0, -3:, -3:, -1]
A_ : str = self.get_dummy_inputs()
A_ : Tuple = 3 * [inputs.pop("""prompt""" )]
A_ : List[Any] = pipe.tokenizer(
_a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,)
A_ : Union[str, Any] = text_inputs["""input_ids"""]
A_ : Tuple = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A_ : Optional[int] = prompt_embeds
# forward
A_ : int = pipe(**_a )
A_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : Dict ):
'''simple docstring'''
A_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = self.get_dummy_inputs()
A_ : Union[str, Any] = 3 * ["""this is a negative prompt"""]
A_ : Any = negative_prompt
A_ : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
A_ : Tuple = pipe(**_a )
A_ : Any = output.images[0, -3:, -3:, -1]
A_ : List[Any] = self.get_dummy_inputs()
A_ : List[str] = 3 * [inputs.pop("""prompt""" )]
A_ : List[Any] = []
for p in [prompt, negative_prompt]:
A_ : int = pipe.tokenizer(
_a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,)
A_ : Tuple = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A_ , A_ : Union[str, Any] = embeds
# forward
A_ : List[Any] = pipe(**_a )
A_ : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self : Tuple ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = ort.SessionOptions()
A_ : int = False
return options
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
A_ : Dict = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" )
A_ : Tuple = output.images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Tuple = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
A_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Dict = """open neural network exchange"""
A_ : Optional[int] = np.random.RandomState(0 )
A_ : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" )
A_ : Tuple = output.images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : str = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Any ):
'''simple docstring'''
A_ : str = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
A_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = """open neural network exchange"""
A_ : Optional[Any] = np.random.RandomState(0 )
A_ : int = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" )
A_ : str = output.images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : str = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Any ):
'''simple docstring'''
A_ : int = 0
def test_callback_fn(_a : int ,_a : int ,_a : np.ndarray ) -> None:
A_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A_ : Union[str, Any] = latents[0, -3:, -3:, -1]
A_ : str = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A_ : Any = latents[0, -3:, -3:, -1]
A_ : int = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
A_ : int = False
A_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_a )
A_ : List[str] = """Andromeda galaxy in a bottle"""
A_ : List[Any] = np.random.RandomState(0 )
pipe(
prompt=_a ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_a ,callback=_a ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Dict ):
'''simple docstring'''
A_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(_a ,_a )
assert pipe.safety_checker is None
A_ : List[str] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
A_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ : int = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
| 27
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Optional[Any] =logging.get_logger(__name__)
class __UpperCAmelCase ( __a ):
__A : int = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowerCAmelCase_ = size if size is not None else {'''height''': 256, '''width''': 256}
lowerCAmelCase_ = get_size_dict(_lowerCamelCase )
lowerCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase_ = get_size_dict(_lowerCamelCase , param_name='''crop_size''' )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
lowerCAmelCase_ = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
lowerCAmelCase_ = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(_lowerCamelCase )
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(_lowerCamelCase , param_name='''crop_size''' )
lowerCAmelCase_ = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase_ = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 274
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case_ ( __snake_case : Tuple=None , __snake_case : int=None) -> int:
return field(default_factory=lambda: default , metadata=__snake_case)
@dataclass
class __UpperCAmelCase :
__A : str = field(
metadata={'help': 'The csv file to plot.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__A : bool = field(
default=__a , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__A : Optional[str] = field(
default=__a , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__A : Optional[List[str]] = list_field(
default=__a , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def snake_case_ ( __snake_case : Optional[Any]) -> Dict:
try:
int(__snake_case)
return True
except ValueError:
return False
def snake_case_ ( __snake_case : Dict) -> int:
try:
float(__snake_case)
return True
except ValueError:
return False
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase ):
lowerCAmelCase_ = args
lowerCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
lowerCAmelCase_ = csv.DictReader(_lowerCamelCase )
for row in reader:
lowerCAmelCase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
lowerCAmelCase_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
lowerCAmelCase_ = float(row['''result'''] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = plt.subplots()
lowerCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
lowerCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
lowerCAmelCase_ = self.result_dict[model_name]['''result''']
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , )
else:
lowerCAmelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
lowerCAmelCase_ = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )]
plt.scatter(
_lowerCamelCase , _lowerCamelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase_ = title_str[:-4]
lowerCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_lowerCamelCase )
plt.xlabel(_lowerCamelCase )
plt.ylabel(_lowerCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case_ ( ) -> Tuple:
lowerCAmelCase_ = HfArgumentParser(__snake_case)
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ = Plot(args=__snake_case)
plot.plot()
if __name__ == "__main__":
main()
| 274
| 1
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def A( self : int ,_SCREAMING_SNAKE_CASE : Any ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
'''simple docstring'''
A = AudioClassificationPipeline(model=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE )
# test with a raw waveform
A = np.zeros((3_4_0_0_0,) )
A = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def A( self : int ,_SCREAMING_SNAKE_CASE : Tuple ,_SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A , A = examples
A = audio_classifier(_SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
_SCREAMING_SNAKE_CASE ,[
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
] ,)
A = audio_classifier(_SCREAMING_SNAKE_CASE ,top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE ,[
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
] ,)
self.run_torchaudio(_SCREAMING_SNAKE_CASE )
@require_torchaudio
def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
import datasets
# test with a local file
A = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
A = dataset[0]['audio']['array']
A = audio_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE ,[
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': ANY(_SCREAMING_SNAKE_CASE ), 'label': ANY(_SCREAMING_SNAKE_CASE )},
] ,)
@require_torch
def A( self : int ) -> str:
'''simple docstring'''
A = 'anton-l/wav2vec2-random-tiny-classifier'
A = pipeline('audio-classification' ,model=_SCREAMING_SNAKE_CASE )
A = np.ones((8_0_0_0,) )
A = audio_classifier(_SCREAMING_SNAKE_CASE ,top_k=4 )
A = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
A = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
A = {'array': np.ones((8_0_0_0,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
A = audio_classifier(_SCREAMING_SNAKE_CASE ,top_k=4 )
self.assertIn(nested_simplify(_SCREAMING_SNAKE_CASE ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def A( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
import datasets
A = 'superb/wav2vec2-base-superb-ks'
A = pipeline('audio-classification' ,model=_SCREAMING_SNAKE_CASE )
A = datasets.load_dataset('anton-l/superb_dummy' ,'ks' ,split='test' )
A = np.array(dataset[3]['speech'] ,dtype=np.floataa )
A = audio_classifier(_SCREAMING_SNAKE_CASE ,top_k=4 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ,decimals=3 ) ,[
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] ,)
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def A( self : Any ) -> int:
'''simple docstring'''
pass
| 110
|
from math import factorial
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
'''simple docstring'''
A = real
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = [1] * rank
else:
A = rank
def __repr__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return (
f'{self.real}+'
f'{"+".join(str(_SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def A( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,_SCREAMING_SNAKE_CASE )
def __add__( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : int ) -> Any:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return Dual(self.real + other ,self.duals )
A = self.duals.copy()
A = other.duals.copy()
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )) )
elif len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )) )
A = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,_SCREAMING_SNAKE_CASE )
snake_case = __add__
def __sub__( self : Tuple ,_SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
return self + other * -1
def __mul__( self : Any ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,_SCREAMING_SNAKE_CASE )
A = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,_SCREAMING_SNAKE_CASE )
snake_case = __mul__
def __truediv__( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,_SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Any ,_SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,_SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : int ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
if n < 0 or isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
A = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : List[Any], UpperCAmelCase : int ):
if not callable(UpperCAmelCase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(UpperCAmelCase, (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError('differentiate() requires an int as input for order' )
A = Dual(UpperCAmelCase, 1 )
A = func(UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case ( UpperCAmelCase : int ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 110
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 169
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 169
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'sentencepiece.model'}
_snake_case : Dict = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_snake_case : Optional[Any] = {
'google/rembert': 2_56,
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, _a, _a=False, _a=True, _a=True, _a="[CLS]", _a="[SEP]", _a="[UNK]", _a="[SEP]", _a="[PAD]", _a="[CLS]", _a="[MASK]", **_a, ) -> Dict:
super().__init__(
do_lower_case=_a, remove_space=_a, keep_accents=_a, bos_token=_a, eos_token=_a, unk_token=_a, sep_token=_a, pad_token=_a, cls_token=_a, mask_token=_a, **_a, )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor()
self.sp_model.Load(_a )
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return len(self.sp_model )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = d
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self, _a, _a=False ) -> int:
__SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(_a )
return pieces
def __lowerCAmelCase ( self, _a ) -> Optional[int]:
return self.sp_model.PieceToId(_a )
def __lowerCAmelCase ( self, _a ) -> Tuple:
return self.sp_model.IdToPiece(_a )
def __lowerCAmelCase ( self, _a ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.sp_model.decode_pieces(_a )
return out_string
def __lowerCAmelCase ( self, _a, _a = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self, _a, _a = None, _a = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __lowerCAmelCase ( self, _a, _a = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file, _a )
return (out_vocab_file,)
| 214
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _A ( __snake_case :str ) -> int:
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def _A ( __snake_case :List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__SCREAMING_SNAKE_CASE = list(__snake_case )
return word_list
def _A ( __snake_case :List[str] , __snake_case :set() ) -> Any:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(__snake_case ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(__snake_case )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__SCREAMING_SNAKE_CASE = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = "##" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def _A ( __snake_case :List[str] , __snake_case :LTP , __snake_case :BertTokenizer ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__SCREAMING_SNAKE_CASE = add_sub_symbol(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def _A ( __snake_case :Tuple ) -> Any:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 214
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE = MvpTokenizer
def __init__(self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
UpperCAmelCase__ = getattr(__a , pre_tok_state.pop('type' ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**__a )
UpperCAmelCase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ = 'post_processor'
UpperCAmelCase__ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ = tuple(state['sep'] )
if "cls" in state:
UpperCAmelCase__ = tuple(state['cls'] )
UpperCAmelCase__ = False
if state.get('add_prefix_space' , __a ) != add_prefix_space:
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = True
if state.get('trim_offsets' , __a ) != trim_offsets:
UpperCAmelCase__ = trim_offsets
UpperCAmelCase__ = True
if changes_to_apply:
UpperCAmelCase__ = getattr(__a , state.pop('type' ) )
UpperCAmelCase__ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCAmelCase__ = value
def UpperCamelCase__ (self , *__a , **__a ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get('is_split_into_words' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__a , **__a )
def UpperCamelCase__ (self , *__a , **__a ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get('is_split_into_words' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__a , **__a )
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def UpperCamelCase__ (self , __a , __a=None ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 146
|
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case__: np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCAmelCase__ , UpperCAmelCase__ = np.shape(snake_case__ )
if rows != columns:
UpperCAmelCase__ = (
'\'table\' has to be of square shaped array but got a '
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(snake_case__ )
UpperCAmelCase__ = np.zeros((rows, columns) )
UpperCAmelCase__ = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCAmelCase__ = (table[i][j] - total) / upper[j][j]
UpperCAmelCase__ = 1
for j in range(snake_case__ , snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
UpperCAmelCase__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 1
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'MCTCTFeatureExtractor'
_lowercase = 'AutoTokenizer'
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = self.feature_extractor
a__ : Any = False
def __call__( self : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a__ : Optional[Any] = kwargs.pop("raw_speech" )
else:
a__ : int = kwargs.pop("audio" , lowerCamelCase__ )
a__ : Dict = kwargs.pop("sampling_rate" , lowerCamelCase__ )
a__ : List[str] = kwargs.pop("text" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
a__ : Optional[int] = args[0]
a__ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a__ : Dict = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
a__ : str = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ : Union[str, Any] = encodings["input_ids"]
return inputs
def _UpperCamelCase( self : str , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = kwargs.pop("input_features" , lowerCamelCase__ )
a__ : Any = kwargs.pop("labels" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
a__ : Optional[int] = args[0]
a__ : Any = args[1:]
if input_features is not None:
a__ : str = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
a__ : Dict = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a__ : List[str] = labels["input_ids"]
return input_features
def _UpperCamelCase( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def _UpperCamelCase( self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a__ : str = True
a__ : List[Any] = self.tokenizer
yield
a__ : Any = self.feature_extractor
a__ : List[str] = False
| 151
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase_ ( __a , __a , __a , __a ) -> List[str]:
a__ : str = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : Any = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a__ : Any = f'''{src_lang}-{tgt_lang}'''
a__ : List[str] = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__a , exist_ok=__a )
a__ : Union[str, Any] = os.path.join(__a , "README.md" )
print(f'''Generating {path}''' )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
UpperCamelCase : Optional[Any] = Path(__file__).resolve().parent.parent.parent
UpperCamelCase : List[str] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase : Union[str, Any] = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 151
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59
|
import operator as op
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
lowerCamelCase__: Tuple ={
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
else:
lowerCamelCase__: List[Any] =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
lowerCamelCase__: Optional[Any] =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 59
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __UpperCamelCase ( *_A : List[Any],**_A : int ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
A = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __UpperCamelCase ( self : Union[str, Any],_A : Dict,_A : int,_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pipeline("visual-question-answering",model="hf-internal-testing/tiny-vilt-random-vqa" )
SCREAMING_SNAKE_CASE_ : Dict = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def __UpperCamelCase ( self : Optional[Any],_A : List[str],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = vqa_pipeline(_lowerCAmelCase,top_k=1 )
self.assertEqual(
_lowerCAmelCase,[
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
],)
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = pipeline("visual-question-answering",model="hf-internal-testing/tiny-vilt-random-vqa" )
SCREAMING_SNAKE_CASE_ : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE_ : List[Any] = "How many cats are there?"
SCREAMING_SNAKE_CASE_ : List[Any] = vqa_pipeline(image=_lowerCAmelCase,question="How many cats are there?",top_k=2 )
self.assertEqual(
_lowerCAmelCase,[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
SCREAMING_SNAKE_CASE_ : List[str] = vqa_pipeline({"image": image, "question": question},top_k=2 )
self.assertEqual(
_lowerCAmelCase,[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = pipeline("visual-question-answering",model="dandelin/vilt-b32-finetuned-vqa" )
SCREAMING_SNAKE_CASE_ : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE_ : Tuple = "How many cats are there?"
SCREAMING_SNAKE_CASE_ : Any = vqa_pipeline(image=_lowerCAmelCase,question=_lowerCAmelCase,top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase,decimals=4 ),[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
SCREAMING_SNAKE_CASE_ : List[Any] = vqa_pipeline({"image": image, "question": question},top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase,decimals=4 ),[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}],top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase,decimals=4 ),[[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2,)
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
pass
| 709
|
from typing import Any
import numpy as np
def _snake_case ( lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return np.array_equal(lowerCAmelCase , matrix.conjugate().T )
def _snake_case ( lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = v.conjugate().T
SCREAMING_SNAKE_CASE_ : int = v_star.dot(lowerCAmelCase )
assert isinstance(lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase )) / (v_star.dot(lowerCAmelCase ))
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE_ : Dict = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowerCAmelCase , lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 316
| 0
|
from __future__ import annotations
import requests
lowerCAmelCase_ = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def snake_case( __magic_name__ , __magic_name__ = 1 , __magic_name__ = "new" , __magic_name__ = None ) -> dict:
'''simple docstring'''
lowercase : List[str] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__magic_name__ ) - valid_terms ) ):
lowercase : List[str] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__magic_name__ )
lowercase : str = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
lowercase : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__magic_name__ )}
lowercase : Optional[Any] = {}
for id_ in range(__magic_name__ ):
lowercase : Optional[Any] = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 217
|
from __future__ import annotations
def snake_case( __magic_name__ , __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : list[list[int]] = []
lowercase : list[int] = []
lowercase : List[str] = 0
lowercase : Any = sum(__magic_name__ )
create_state_space_tree(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return result
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> None:
'''simple docstring'''
if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum:
return
if sum(__magic_name__ ) == max_sum:
result.append(__magic_name__ )
return
for index in range(__magic_name__ , len(__magic_name__ ) ):
create_state_space_tree(
__magic_name__ , __magic_name__ , index + 1 , [*path, nums[index]] , __magic_name__ , remaining_nums_sum - nums[index] , )
lowerCAmelCase_ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase_ = 9
lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 217
| 1
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str=0.01 , SCREAMING_SNAKE_CASE : Dict=1_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = p_stop
SCREAMING_SNAKE_CASE_ :List[Any] = max_length
def __iter__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE_ :Tuple = random.random() < self.p_stop
class __lowerCAmelCase( unittest.TestCase ):
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Tuple=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
BatchSamplerShard(SCREAMING_SNAKE_CASE , 2 , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
for i in range(2 )
]
SCREAMING_SNAKE_CASE_ :List[str] = [list(SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ :int = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ :Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ :Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ :Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ :str = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ :Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ :Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ :Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ :Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE_ :int = [BatchSamplerShard(SCREAMING_SNAKE_CASE , 2 , SCREAMING_SNAKE_CASE , even_batches=SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = list(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , drop_last=SCREAMING_SNAKE_CASE , num_processes=SCREAMING_SNAKE_CASE , process_index=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE , )
for i in range(SCREAMING_SNAKE_CASE )
]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE , reference[: len(SCREAMING_SNAKE_CASE )] )
def _lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = 42
SCREAMING_SNAKE_CASE_ :List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE_ :List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = SkipBatchSampler(SCREAMING_SNAKE_CASE , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE_ :Any = skip_first_batches(SCREAMING_SNAKE_CASE , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase ( self : List[str] ):
"""simple docstring"""
Accelerator()
SCREAMING_SNAKE_CASE_ :Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 705
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = 'xlm-roberta'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=30_522 , SCREAMING_SNAKE_CASE : List[str]=768 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : Optional[Any]=3_072 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=512 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-12 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :int = hidden_size
SCREAMING_SNAKE_CASE_ :Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ :Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ :Any = initializer_range
SCREAMING_SNAKE_CASE_ :List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ :Any = position_embedding_type
SCREAMING_SNAKE_CASE_ :List[Any] = use_cache
SCREAMING_SNAKE_CASE_ :int = classifier_dropout
class __lowerCAmelCase( lowerCAmelCase__ ):
@property
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 233
| 0
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Optional[Any] = 1_6
a__ : Tuple = 3_2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : Tuple = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
__SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 682
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[int] = "vivit"
def __init__( self : Dict , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=[2, 1_6, 1_6] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu_fast" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : str=1E-06 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_frames
__SCREAMING_SNAKE_CASE = tubelet_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**UpperCAmelCase__ )
| 682
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = KandinskyInpaintPipeline
_UpperCamelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCamelCase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCamelCase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCamelCase = False
@property
def UpperCamelCase_ ( self ):
return 32
@property
def UpperCamelCase_ ( self ):
return 32
@property
def UpperCamelCase_ ( self ):
return self.time_input_dim
@property
def UpperCamelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ):
return 1_00
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=10_05 ,)
lowerCamelCase__ = MultilingualCLIP(_lowerCAmelCase )
lowerCamelCase__ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase__ = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def UpperCamelCase_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_unet
lowerCamelCase__ = self.dummy_movq
lowerCamelCase__ = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule="""linear""" ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_lowerCAmelCase ,set_alpha_to_one=_lowerCAmelCase ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_lowerCAmelCase ,)
lowerCamelCase__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=0 ):
lowerCamelCase__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowerCamelCase__ = np.ones((64, 64) ,dtype=np.floataa )
lowerCamelCase__ = 0
if str(_lowerCAmelCase ).startswith("""mps""" ):
lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase )
else:
lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCamelCase__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**_lowerCAmelCase )
lowerCamelCase__ = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCamelCase__ = output.images
lowerCamelCase__ = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) ,return_dict=_lowerCAmelCase ,)[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCamelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase__ = np.ones((7_68, 7_68) ,dtype=np.floataa )
lowerCamelCase__ = 0
lowerCamelCase__ = """a hat"""
lowerCamelCase__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCamelCase__ = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" ,torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ = pipe_prior(
_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
lowerCamelCase__ = pipeline(
_lowerCAmelCase ,image=_lowerCAmelCase ,mask_image=_lowerCAmelCase ,image_embeds=_lowerCAmelCase ,negative_image_embeds=_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,output_type="""np""" ,)
lowerCamelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCAmelCase ,_lowerCAmelCase )
| 9
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = None
_UpperCAmelCase = field(default="""Translation""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = None
_UpperCAmelCase = field(default="""TranslationVariableLanguages""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set(self.languages )
if self.languages and set(lowerCAmelCase__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(lowerCAmelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : Dict = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = zip(*sorted(lowerCAmelCase__ ) )
return {"language": languages, "translation": translations}
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 101
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any=0 ):
lowerCamelCase_ : Tuple =np.random.RandomState(snake_case__ )
lowerCamelCase_ : Union[str, Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[str] =self.get_dummy_inputs()
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Dict =np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Union[str, Any] =np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Any =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : str =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : Dict =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Optional[Any] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : str =pipe(**snake_case__ ).images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[int] =np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =self.get_dummy_inputs()
lowerCamelCase_ : Tuple =pipe(**snake_case__ ).images
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : List[Any] =np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[int] =pipe(**snake_case__ )
lowerCamelCase_ : Dict =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : Any =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : Union[str, Any] =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Any =text_inputs["input_ids"]
lowerCamelCase_ : Dict =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase_ : Union[str, Any] =prompt_embeds
# forward
lowerCamelCase_ : Tuple =pipe(**snake_case__ )
lowerCamelCase_ : List[str] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * ["this is a negative prompt"]
lowerCamelCase_ : Tuple =negative_prompt
lowerCamelCase_ : List[str] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : str =self.get_dummy_inputs()
lowerCamelCase_ : int =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : List[Any] =[]
for p in [prompt, negative_prompt]:
lowerCamelCase_ : Tuple =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Dict =text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase_ , lowerCamelCase_ : int =embeds
# forward
lowerCamelCase_ : str =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =ort.SessionOptions()
lowerCamelCase_ : List[Any] =False
return options
def UpperCAmelCase__ ( self : Tuple ):
# using the PNDM scheduler by default
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] ="A painting of a squirrel eating a burger"
np.random.seed(0 )
lowerCamelCase_ : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
lowerCamelCase_ : Union[str, Any] =output.images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[str] =np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] =DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int ="open neural network exchange"
lowerCamelCase_ : List[Any] =np.random.RandomState(0 )
lowerCamelCase_ : str =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Optional[int] =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Any =np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] ="open neural network exchange"
lowerCamelCase_ : str =np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict =np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any =0
def test_callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : np.ndarray ) -> None:
lowerCamelCase_ : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : List[str] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : Optional[Any] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] =np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowerCamelCase_ : Any =False
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict ="Andromeda galaxy in a bottle"
lowerCamelCase_ : Union[str, Any] =np.random.RandomState(0 )
pipe(
prompt=snake_case__ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case__ , callback=snake_case__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case__ , snake_case__ )
assert pipe.safety_checker is None
lowerCamelCase_ : Tuple =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ : Any =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 153
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("""fixtures""")
UpperCamelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCamelCase = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Optional[Any]:
A__ = 0
def snake_case__ ( self ) -> List[Any]:
A__ = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[Any]:
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ ).to_dict()
config_dict.pop("feature_extractor_type" )
A__ = WavaVecaFeatureExtractor(**SCREAMING_SNAKE_CASE__ )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[int]:
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[int]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , "bert-base is not a local folder and is not a valid model identifier" ):
A__ = AutoFeatureExtractor.from_pretrained("bert-base" )
def snake_case__ ( self ) -> int:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , revision="aaaaaa" )
def snake_case__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
A__ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ) -> Any:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A__ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A__ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
A__ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def snake_case__ ( self ) -> int:
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self ) -> Tuple:
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
A__ : int = True
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
A__ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
A__ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
A__ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE__ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 717
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , "decord" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Optional[int]:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=1 ) -> Dict:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
A__ = BytesIO(requests.get(SCREAMING_SNAKE_CASE__ ).content )
A__ = VideoReader(SCREAMING_SNAKE_CASE__ )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num=SCREAMING_SNAKE_CASE__ , dtype=np.intaa )
A__ = videoreader.get_batch(SCREAMING_SNAKE_CASE__ ).asnumpy()
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
A__ = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5 ) -> Dict:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
| 562
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = F'''{sampling_rate}'''
SCREAMING_SNAKE_CASE_ : str = '1'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE_ : str = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE_ : Optional[int] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE_ : Tuple = output_stream[0]
SCREAMING_SNAKE_CASE_ : str = np.frombuffer(A__, np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a__ ( A__, A__, A__ = "f32le", ):
SCREAMING_SNAKE_CASE_ : int = F'''{sampling_rate}'''
SCREAMING_SNAKE_CASE_ : Any = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE_ : Tuple = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
SCREAMING_SNAKE_CASE_ : List[str] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE_ : Any = 'alsa'
SCREAMING_SNAKE_CASE_ : Tuple = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE_ : List[str] = 'avfoundation'
SCREAMING_SNAKE_CASE_ : Dict = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE_ : List[str] = 'dshow'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'default'
SCREAMING_SNAKE_CASE_ : List[Any] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE_ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ : List[str] = _ffmpeg_stream(A__, A__ )
for item in iterator:
yield item
def a__ ( A__, A__, A__ = None, A__ = None, A__ = "f32le", ):
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE_ : int = stream_chunk_s
else:
SCREAMING_SNAKE_CASE_ : Any = chunk_length_s
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ffmpeg_microphone(A__, A__, format_for_conversion=A__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE_ : List[Any] = np.intaa
SCREAMING_SNAKE_CASE_ : List[str] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE_ : str = np.floataa
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
SCREAMING_SNAKE_CASE_ : Any = chunk_length_s / 6
SCREAMING_SNAKE_CASE_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__, (int, float) ):
SCREAMING_SNAKE_CASE_ : Dict = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE_ : Optional[int] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ : Any = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ : Any = datetime.datetime.now()
SCREAMING_SNAKE_CASE_ : Optional[Any] = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__, A__, stride=(stride_left, stride_right), stream=A__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.frombuffer(item['raw'], dtype=A__ )
SCREAMING_SNAKE_CASE_ : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE_ : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def a__ ( A__, A__, A__, A__ = False ):
SCREAMING_SNAKE_CASE_ : str = B''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
SCREAMING_SNAKE_CASE_ : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE_ : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE_ : int = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE_ : Dict = False
yield item
SCREAMING_SNAKE_CASE_ : int = stride_left
SCREAMING_SNAKE_CASE_ : Any = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
SCREAMING_SNAKE_CASE_ : int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE_ : Optional[int] = False
yield item
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = 2**2_4 # 16Mo
try:
with subprocess.Popen(A__, stdout=subprocess.PIPE, bufsize=A__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE_ : str = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 101
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=False) -> Optional[int]:
lowerCAmelCase_ = OmegaConf.load(__snake_case)
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case)))
return config
def snake_case_ ( __snake_case : Any , __snake_case : Optional[int]=None , __snake_case : str=None) -> str:
if conf_path is None:
lowerCAmelCase_ = '''./model_checkpoints/vqgan_only.yaml'''
lowerCAmelCase_ = load_config(__snake_case , display=__snake_case)
lowerCAmelCase_ = VQModel(**config.model.params)
if ckpt_path is None:
lowerCAmelCase_ = '''./model_checkpoints/vqgan_only.pt'''
lowerCAmelCase_ = torch.load(__snake_case , map_location=__snake_case)
if ".ckpt" in ckpt_path:
lowerCAmelCase_ = sd['''state_dict''']
model.load_state_dict(__snake_case , strict=__snake_case)
model.to(__snake_case)
del sd
return model
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : str) -> Optional[int]:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = model.encode(__snake_case)
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''')
lowerCAmelCase_ = model.decode(__snake_case)
return xrec
def snake_case_ ( __snake_case : int , __snake_case : List[Any]=False) -> Union[str, Any]:
lowerCAmelCase_ ,lowerCAmelCase_ = string.rsplit('''.''' , 1)
if reload:
lowerCAmelCase_ = importlib.import_module(__snake_case)
importlib.reload(__snake_case)
return getattr(importlib.import_module(__snake_case , package=__snake_case) , cls)
def snake_case_ ( __snake_case : Tuple) -> Union[str, Any]:
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''')
return get_obj_from_str(config['''target'''])(**config.get('''params''' , {}))
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int]=True , __snake_case : Any=True) -> List[str]:
lowerCAmelCase_ = instantiate_from_config(__snake_case)
if sd is not None:
model.load_state_dict(__snake_case)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case_ ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : str) -> Any:
# load the specified checkpoint
if ckpt:
lowerCAmelCase_ = torch.load(__snake_case , map_location='''cpu''')
lowerCAmelCase_ = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''')
else:
lowerCAmelCase_ = {'''state_dict''': None}
lowerCAmelCase_ = None
lowerCAmelCase_ = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__snake_case , eval_mode=__snake_case)['''model''']
return model, global_step
| 274
| 0
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
while a != 0:
_snake_case , _snake_case = b % a, a
return b
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) != 1:
_snake_case = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case = 1, 0, a
_snake_case , _snake_case , _snake_case = 0, 1, m
while va != 0:
_snake_case = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 404
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''time_series_transformer'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : int = 6_4 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = embedding_dimension
else:
_snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(__lowerCamelCase ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 404
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__SCREAMING_SNAKE_CASE ="\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__SCREAMING_SNAKE_CASE ="\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__SCREAMING_SNAKE_CASE ="\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] ,reference_urls=[
'https://github.com/m-popovic/chrF',
] ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = CHRF.CHAR_ORDER ,__UpperCamelCase = CHRF.WORD_ORDER ,__UpperCamelCase = CHRF.BETA ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase_ : Union[str, Any] = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
lowercase_ : List[Any] = CHRF(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = sb_chrf.corpus_score(__UpperCamelCase ,__UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 425
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = 0
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__UpperCamelCase ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__UpperCamelCase ) ,0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
# Check that tokenizer_type ≠ model_type
lowercase_ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' ,os.path.join(__UpperCamelCase ,'vocab.txt' ) )
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='bert' ,use_fast=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' ,os.path.join(__UpperCamelCase ,'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' ,os.path.join(__UpperCamelCase ,'merges.txt' ) )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='gpt2' ,use_fast=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' ,os.path.join(__UpperCamelCase ,'vocab.txt' ) )
lowercase_ : str = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='bert' )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' ,os.path.join(__UpperCamelCase ,'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' ,os.path.join(__UpperCamelCase ,'merges.txt' ) )
lowercase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase ,tokenizer_type='gpt2' )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
with pytest.raises(__UpperCamelCase ):
AutoTokenizer.from_pretrained('./' ,tokenizer_type='xxx' )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase_ : Optional[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,__UpperCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case ,__UpperCamelCase )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__UpperCamelCase ,'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' ,):
lowercase_ : Optional[Any] = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = TOKENIZER_MAPPING.values()
lowercase_ : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__UpperCamelCase )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ,use_fast=__UpperCamelCase ) ,__UpperCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) ,__UpperCamelCase )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = AutoTokenizer.from_pretrained('distilbert-base-uncased' ,do_lower_case=__UpperCamelCase )
lowercase_ : Optional[int] = 'Hello, world. How are you?'
lowercase_ : Optional[int] = tokenizer.tokenize(__UpperCamelCase )
self.assertEqual('[UNK]' ,tokens[0] )
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('microsoft/mpnet-base' ,do_lower_case=__UpperCamelCase )
lowercase_ : Tuple = tokenizer.tokenize(__UpperCamelCase )
self.assertEqual('[UNK]' ,tokens[0] )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,3_0000 )
self.assertEqual(tokenizer.unk_token ,'[UNK]' )
self.assertEqual(tokenizer.padding_side ,'right' )
self.assertEqual(tokenizer.truncation_side ,'right' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Union[str, Any] = get_tokenizer_config('bert-base-cased' )
lowercase_ : Union[str, Any] = config.pop('_commit_hash' ,__UpperCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__UpperCamelCase ,{'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase_ : Any = get_tokenizer_config(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : int = get_tokenizer_config(__UpperCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] ,'BertTokenizer' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register('custom' ,__UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
lowercase_ : List[str] = CustomTokenizer.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register('custom' ,__UpperCamelCase )
# Can register in two steps
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoTokenizer.register(__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Dict = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
lowercase_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
lowercase_ : int = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : int = AutoTokenizer.from_pretrained(__UpperCamelCase ,trust_remote_code=__UpperCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
lowercase_ : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'NewTokenizer' )
@require_tokenizers
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
class UpperCamelCase ( lowercase_ ):
lowercase = False
class UpperCamelCase ( lowercase_ ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register('custom' ,__UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase ,slow_tokenizer_class=__UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase ,fast_tokenizer_class=__UpperCamelCase )
# If remote code is not set, the default is to use local
lowercase_ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase_ : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ,use_fast=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase_ : List[str] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' ,trust_remote_code=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
lowercase_ : Tuple = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' ,trust_remote_code=__UpperCamelCase ,use_fast=__UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,'bert-base is not a local folder and is not a valid model identifier' ):
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('bert-base' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase ,revision='aaaaaa' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 425
| 1
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_A = 2_9_9_7_9_2_4_5_8
# Symbols
_A , _A , _A , _A = symbols('ct x y z')
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(a_ ) ** 2 )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return np.array(
[
[gamma(a_ ), -gamma(a_ ) * beta(a_ ), 0, 0],
[-gamma(a_ ) * beta(a_ ), gamma(a_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCAmelCase ( a_, a_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_A = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
_A = {ct: c, x: 1, y: 1, z: 1}
_A = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 133
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'swin2sr'
lowercase_ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase_=64 , UpperCAmelCase_=1 , UpperCAmelCase_=3 , UpperCAmelCase_=180 , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=8 , UpperCAmelCase_=2.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=2 , UpperCAmelCase_=1.0 , UpperCAmelCase_="1conv" , UpperCAmelCase_="pixelshuffle" , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
lowerCamelCase : int = image_size
lowerCamelCase : Tuple = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : int = depths
lowerCamelCase : Any = len(UpperCAmelCase_ )
lowerCamelCase : Tuple = num_heads
lowerCamelCase : Optional[int] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = drop_path_rate
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : str = use_absolute_embeddings
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = upscale
lowerCamelCase : List[Any] = img_range
lowerCamelCase : Optional[Any] = resi_connection
lowerCamelCase : Union[str, Any] = upsampler
| 133
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ = '''maskformer-swin'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , a__=2_2_4 , a__=4 , a__=3 , a__=9_6 , a__=[2, 2, 6, 2] , a__=[3, 6, 1_2, 2_4] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=None , a__=None , **a__ , ):
super().__init__(**__A)
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(__A)
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(__A) - 1))
A__ = ["stem"] + [F"stage{idx}" for idx in range(1 , len(__A) + 1)]
A__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names)
| 632
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : List[str] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : Tuple = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : List[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Any , __A : Tuple=None , __A : int=None , __A : Any=None , __A : Dict=None , __A : Optional[Any]="auto" , __A : Any=-1 , __A : Tuple=0.9 , __A : Tuple=5 , __A : int=5_0_0 , __A : Any="gpt2-large" , __A : List[str]=-1 , __A : List[Any]=1_0_2_4 , __A : int=2_5 , __A : Dict=5 , __A : Any=True , __A : str=2_5 , ):
snake_case__ : Dict = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A :
'''simple docstring'''
A__ = None
A__ = False
A__ = False
A__ = False
A__ = None
A__ = None
A__ = False
A__ = False
A__ = False
A__ = True
A__ = None
A__ = 1
A__ = None
A__ = False
A__ = None
A__ = None
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCamelCase__ ) for k, v in self.__dict__.items()} )
| 15
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in table:
res += inp[i - 1]
return res
def __snake_case ( _UpperCAmelCase : Dict):
return data[1:] + data[0]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in range(len(_UpperCAmelCase)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : Dict):
UpperCamelCase = int('''0b''' + data[0] + data[-1], 2)
UpperCamelCase = int('''0b''' + data[1:3], 2)
return bin(s[row][col])[2:]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int]):
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[:4]) # noqa: E741
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[4:])
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + l # noqa: E741
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + r
UpperCamelCase = apply_table(l + r, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
return temp + right
if __name__ == "__main__":
snake_case_ : List[Any] = input('Enter 10 bit key: ')
snake_case_ : Union[str, Any] = input('Enter 8 bit message: ')
snake_case_ : List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
snake_case_ : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
snake_case_ : Tuple = [2, 4, 3, 1]
snake_case_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : str = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : Optional[int] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : int = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Union[str, Any] = apply_table(key, paa_table)
snake_case_ : Optional[int] = temp[:5]
snake_case_ : str = temp[5:]
snake_case_ : str = left_shift(left)
snake_case_ : Dict = left_shift(right)
snake_case_ : List[Any] = apply_table(left + right, pa_table)
snake_case_ : Union[str, Any] = left_shift(left)
snake_case_ : Union[str, Any] = left_shift(right)
snake_case_ : str = left_shift(left)
snake_case_ : Tuple = left_shift(right)
snake_case_ : List[str] = apply_table(left + right, pa_table)
# encryption
snake_case_ : Any = apply_table(message, IP)
snake_case_ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : List[str] = function(expansion, sa, sa, keya, temp)
snake_case_ : Dict = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : List[Any] = apply_table(CT, IP)
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : List[Any] = temp[4:] + temp[:4]
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 212
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: str ) -> List[str]:
snake_case_ :Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , """tf_padding""" ) )
self.parent.assertTrue(hasattr(snake_case , """depth_multiplier""" ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self: List[Any] , snake_case: Dict , snake_case: Dict=13 , snake_case: List[Any]=3 , snake_case: List[Any]=32 , snake_case: Dict=0.2_5 , snake_case: Tuple=8 , snake_case: List[Any]=8 , snake_case: Optional[int]=6 , snake_case: str=32 , snake_case: Dict=True , snake_case: Optional[Any]=True , snake_case: Optional[Any]=True , snake_case: Optional[Any]="relu6" , snake_case: int=1_280 , snake_case: str=0.1 , snake_case: Any=0.0_2 , snake_case: Dict=True , snake_case: Optional[Any]=True , snake_case: Union[str, Any]=10 , snake_case: Dict=None , ) -> Union[str, Any]:
snake_case_ :int = parent
snake_case_ :Any = batch_size
snake_case_ :str = num_channels
snake_case_ :List[Any] = image_size
snake_case_ :Any = depth_multiplier
snake_case_ :Optional[Any] = depth_divisible_by
snake_case_ :str = min_depth
snake_case_ :Dict = expand_ratio
snake_case_ :Optional[int] = tf_padding
snake_case_ :Union[str, Any] = output_stride
snake_case_ :str = first_layer_is_expansion
snake_case_ :List[Any] = finegrained_output
snake_case_ :List[str] = hidden_act
snake_case_ :Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case_ :Dict = classifier_dropout_prob
snake_case_ :Dict = use_labels
snake_case_ :str = is_training
snake_case_ :Union[str, Any] = num_labels
snake_case_ :str = initializer_range
snake_case_ :List[str] = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :int = None
snake_case_ :List[str] = None
if self.use_labels:
snake_case_ :List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ :Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self: Dict ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: Optional[Any] , snake_case: str , snake_case: str ) -> List[str]:
snake_case_ :int = MobileNetVaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict , snake_case: Tuple , snake_case: Dict , snake_case: int ) -> str:
snake_case_ :List[str] = self.num_labels
snake_case_ :Optional[Any] = MobileNetVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :List[str] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Union[str, Any] , snake_case: List[Any] , snake_case: Tuple , snake_case: Any ) -> Optional[Any]:
snake_case_ :Tuple = self.num_labels
snake_case_ :Optional[Any] = MobileNetVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :List[str] = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ :Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_ :Tuple = self.prepare_config_and_inputs()
snake_case_ :int = config_and_inputs
snake_case_ :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Tuple = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Any = False
_A : Any = False
_A : Tuple = False
_A : Optional[Any] = False
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case_ :Optional[Any] = MobileNetVaModelTester(self )
snake_case_ :Optional[Any] = MobileNetVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase_ ( self: str ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[Any] = model_class(snake_case )
snake_case_ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :int = [*signature.parameters.keys()]
snake_case_ :int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
def check_hidden_states_output(snake_case: Any , snake_case: List[str] , snake_case: Optional[int] ):
snake_case_ :Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Optional[Any] = outputs.hidden_states
snake_case_ :int = 16
self.assertEqual(len(snake_case ) , snake_case )
snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: int ) -> Union[str, Any]:
snake_case_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def lowerCAmelCase_ ( self: Any ) -> int:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :Union[str, Any] = MobileNetVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self: Any ) -> Any:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case_ :List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(snake_case )
snake_case_ :List[Any] = self.default_image_processor
snake_case_ :List[str] = prepare_img()
snake_case_ :Optional[Any] = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :List[str] = model(**snake_case )
# verify the logits
snake_case_ :Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ :int = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :List[str] = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case_ :Optional[int] = model.to(snake_case )
snake_case_ :Optional[int] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case_ :str = prepare_img()
snake_case_ :Union[str, Any] = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :Optional[int] = model(**snake_case )
snake_case_ :Optional[Any] = outputs.logits
# verify the logits
snake_case_ :List[str] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , snake_case )
snake_case_ :Optional[int] = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 ) )
| 703
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__a = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
_A : Union[str, Any] = None
@experimental
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
return _map_with_joblib(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = num_proc if num_proc <= len(_lowercase ) else len(_lowercase )
snake_case_ :int = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowercase ):
snake_case_ :List[str] = len(_lowercase ) // num_proc
snake_case_ :Any = len(_lowercase ) % num_proc
snake_case_ :Optional[int] = div * index + min(_lowercase, _lowercase )
snake_case_ :Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(_lowercase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(_lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case_, snake_case_ :Optional[int] = None, None
if not disable_tqdm:
snake_case_, snake_case_ :List[str] = (RLock(),), tqdm.set_lock
with Pool(_lowercase, initargs=_lowercase, initializer=_lowercase ) as pool:
snake_case_ :Optional[Any] = pool.map(_lowercase, _lowercase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case_ :Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(_lowercase )} objects""" )
return mapped
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=_lowercase ):
return joblib.Parallel()(
joblib.delayed(_lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case_ :Optional[int] = None
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 100 ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 203
| 0
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 363
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
__lowerCamelCase : int =from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowerCamelCase : str =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowerCamelCase : List[str] =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =''''''
__lowerCamelCase : List[str] =flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
__lowerCamelCase : List[str] =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : Tuple =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCamelCase : Optional[int] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Union[str, Any] =jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCamelCase : Optional[Any] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Dict =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCamelCase : str =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowerCamelCase : Optional[int] ='''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowerCamelCase : Optional[Any] =list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 363
| 1
|
"""simple docstring"""
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__UpperCAmelCase = hex_num[0] == '''-'''
if is_negative:
__UpperCAmelCase = hex_num[1:]
try:
__UpperCAmelCase = int(snake_case_ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__UpperCAmelCase = ''''''
while int_num > 0:
__UpperCAmelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __snake_case ( a , a ):
UpperCAmelCase__ : Optional[Any] = '''pixel_values'''
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = TimmBackboneConfig
def __init__( self : Tuple , _snake_case : List[str] , **_snake_case : Optional[Any]):
"""simple docstring"""
requires_backends(self , '''timm''')
super().__init__(_snake_case)
UpperCAmelCase_ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''')
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""")
if hasattr(_snake_case , '''out_features''') and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''')
UpperCAmelCase_ = getattr(_snake_case , '''use_pretrained_backbone''' , _snake_case)
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''')
# We just take the final layer by default. This matches the default for the transformers models.
UpperCAmelCase_ = config.out_indices if getattr(_snake_case , '''out_indices''' , _snake_case) is not None else (-1,)
UpperCAmelCase_ = timm.create_model(
config.backbone , pretrained=_snake_case , features_only=config.features_only , in_chans=config.num_channels , out_indices=_snake_case , **_snake_case , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCAmelCase_ = self._backbone.return_layers
UpperCAmelCase_ = {layer['''module''']: str(_snake_case) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(_snake_case)
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , _snake_case : int , *_snake_case : List[Any] , **_snake_case : Dict):
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''])
from ...models.timm_backbone import TimmBackboneConfig
UpperCAmelCase_ = kwargs.pop('''config''' , TimmBackboneConfig())
UpperCAmelCase_ = kwargs.pop('''use_timm_backbone''' , _snake_case)
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''')
UpperCAmelCase_ = kwargs.pop('''num_channels''' , config.num_channels)
UpperCAmelCase_ = kwargs.pop('''features_only''' , config.features_only)
UpperCAmelCase_ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone)
UpperCAmelCase_ = kwargs.pop('''out_indices''' , config.out_indices)
UpperCAmelCase_ = TimmBackboneConfig(
backbone=_snake_case , num_channels=_snake_case , features_only=_snake_case , use_pretrained_backbone=_snake_case , out_indices=_snake_case , )
return super()._from_config(_snake_case , **_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : Any , _snake_case : Optional[int] , _snake_case : str=None , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , **_snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''')
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCAmelCase_ = self._all_layers
UpperCAmelCase_ = self._backbone(_snake_case , **_snake_case)
UpperCAmelCase_ = self._return_layers
UpperCAmelCase_ = tuple(hidden_states[i] for i in self.out_indices)
else:
UpperCAmelCase_ = self._backbone(_snake_case , **_snake_case)
UpperCAmelCase_ = None
UpperCAmelCase_ = tuple(_snake_case)
UpperCAmelCase_ = tuple(_snake_case) if hidden_states is not None else None
if not return_dict:
UpperCAmelCase_ = (feature_maps,)
if output_hidden_states:
UpperCAmelCase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case , hidden_states=_snake_case , attentions=_snake_case)
| 169
|
def A (__A : int ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 0:
return False
UpperCAmelCase_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
| 1
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
a : Optional[int] = logging.getLogger()
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """\n""".join(_A )
Path(_A ).open("""w""" ).writelines(_A )
a : Any = '''patrickvonplaten/t5-tiny-random'''
a : Union[str, Any] = '''sshleifer/bart-tiny-random'''
a : Tuple = '''sshleifer/tiny-mbart'''
a : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
_UpperCAmelCase = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
_UpperCAmelCase = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
_UpperCAmelCase = """translation_en_to_de""" if model == T5_TINY else """summarization"""
_UpperCAmelCase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
run_generate()
assert Path(__UpperCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
self.run_eval_tester(__UpperCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : List[Any] , __UpperCamelCase : List[str] ) ->Optional[int]:
'''simple docstring'''
self.run_eval_tester(__UpperCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : List[str] , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
_UpperCAmelCase = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
_UpperCAmelCase = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / """scores.json""" )
_UpperCAmelCase = str(tmp_dir / """val.target""" )
_dump_articles(__UpperCamelCase , text["""en"""] )
_dump_articles(__UpperCamelCase , text["""de"""] )
_UpperCAmelCase = """translation_en_to_de""" if model == T5_TINY else """summarization"""
_UpperCAmelCase = f"""
run_eval_search.py
{model}
{str(__UpperCamelCase )}
{str(__UpperCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [""" num_beams | length_penalty""", model, """Best score args"""]
_UpperCAmelCase = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(__UpperCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__UpperCamelCase ).exists()
os.remove(Path(__UpperCamelCase ) )
| 555
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def _UpperCamelCase ( _A ) -> Callable:
"""simple docstring"""
@wraps(_A )
def _inner_fn(*_A , **_A ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _A , )
return fn(*_A , **_A )
return _inner_fn
| 555
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__a : Any = TaTokenizerFast
__a : Any = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__a : int = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 522
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowercase , lowercase , bias=lowercase )
__lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowercase )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__a : Union[str, Any] = parser.parse_args()
__a : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 522
| 1
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase__ ( a ):
# vision encoder
if "img_encoder.pos_embed" in name:
__snake_case = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
__snake_case = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
__snake_case = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
__snake_case = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
__snake_case = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
__snake_case = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
__snake_case = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
__snake_case = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
__snake_case = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
__snake_case = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
__snake_case = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
__snake_case = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
__snake_case = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
__snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__snake_case = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
__snake_case = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
__snake_case = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
__snake_case = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
__snake_case = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
__snake_case = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
__snake_case = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def lowerCamelCase__ ( a , a ):
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(a )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__snake_case = key.split('.' )
__snake_case , __snake_case = int(key_split[2] ), int(key_split[4] )
__snake_case = config.vision_config.hidden_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__snake_case = key.split('.' )
__snake_case = int(key_split[3] )
__snake_case = config.text_config.hidden_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[
dim : dim * 2, :
]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
else:
__snake_case = rename_key(a )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__snake_case = val.squeeze_()
else:
__snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ):
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a , a , a="groupvit-gcc-yfcc" , a=False ):
__snake_case = GroupViTConfig()
__snake_case = GroupViTModel(a ).eval()
__snake_case = torch.load(a , map_location='cpu' )['model']
__snake_case = convert_state_dict(a , a )
__snake_case , __snake_case = model.load_state_dict(a , strict=a )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(a ) == 0)
# verify result
__snake_case = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
__snake_case = prepare_img()
__snake_case = processor(text=['a photo of a cat', 'a photo of a dog'] , images=a , padding=a , return_tensors='pt' )
with torch.no_grad():
__snake_case = model(**a )
if model_name == "groupvit-gcc-yfcc":
__snake_case = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
__snake_case = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , a , atol=1E-3 )
processor.save_pretrained(a )
model.save_pretrained(a )
print('Successfully saved processor and model to' , a )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(a , organization='nielsr' )
model.push_to_hub(a , organization='nielsr' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowercase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 356
| 1
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) -> Any:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int="attention" ) -> Dict:
__lowerCAmelCase =__lowerCAmelCase =np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
__lowerCAmelCase =k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowerCAmelCase =np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
__lowerCAmelCase =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowerCAmelCase =np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
__lowerCAmelCase =q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowerCAmelCase =np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
__lowerCAmelCase =v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=False ) -> List[Any]:
if split_mlp_wi:
__lowerCAmelCase =params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
__lowerCAmelCase =params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
__lowerCAmelCase =(wi_a, wi_a)
else:
__lowerCAmelCase =params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
__lowerCAmelCase =params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCAmelCase ( __lowerCamelCase : dict , *, __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : bool = False ) -> Optional[Any]:
__lowerCAmelCase =traverse_util.flatten_dict(variables["""target"""] )
__lowerCAmelCase ={"""/""".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCAmelCase ="""encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __lowerCamelCase )
__lowerCAmelCase =collections.OrderedDict()
# Shared embeddings.
__lowerCAmelCase =old["""token_embedder/embedding"""]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase =tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , """encoder""" , """pre_attention_layer_norm""" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , """encoder""" , """attention""" )
__lowerCAmelCase =layer_norm
__lowerCAmelCase =k.T
__lowerCAmelCase =o.T
__lowerCAmelCase =q.T
__lowerCAmelCase =v.T
# Block i, layer 1 (MLP).
__lowerCAmelCase =tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
__lowerCAmelCase , __lowerCAmelCase =tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , """encoder""" , __lowerCamelCase )
__lowerCAmelCase =layer_norm
if split_mlp_wi:
__lowerCAmelCase =wi[0].T
__lowerCAmelCase =wi[1].T
else:
__lowerCAmelCase =wi.T
__lowerCAmelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase =tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , """encoder""" ).T
__lowerCAmelCase =old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__lowerCAmelCase =tax_relpos_bias_lookup(
__lowerCamelCase , 0 , """encoder""" ).T
__lowerCAmelCase =tax_relpos_bias_lookup(
__lowerCamelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase =tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" , """self_attention""" )
__lowerCAmelCase =layer_norm
__lowerCAmelCase =k.T
__lowerCAmelCase =o.T
__lowerCAmelCase =q.T
__lowerCAmelCase =v.T
# Block i, layer 1 (Cross Attention).
__lowerCAmelCase =tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" , """encoder_decoder_attention""" )
__lowerCAmelCase =layer_norm
__lowerCAmelCase =k.T
__lowerCAmelCase =o.T
__lowerCAmelCase =q.T
__lowerCAmelCase =v.T
# Block i, layer 2 (MLP).
__lowerCAmelCase =tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
__lowerCAmelCase , __lowerCAmelCase =tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" , __lowerCamelCase )
__lowerCAmelCase =layer_norm
if split_mlp_wi:
__lowerCAmelCase =wi[0].T
__lowerCAmelCase =wi[1].T
else:
__lowerCAmelCase =wi.T
__lowerCAmelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase =tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , """decoder""" ).T
__lowerCAmelCase =old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCAmelCase =old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : bool ) -> str:
__lowerCAmelCase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowerCAmelCase =state_dict["""shared.weight"""]
return state_dict
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ) -> Any:
__lowerCAmelCase =checkpoints.load_tax_checkpoint(__lowerCamelCase )
__lowerCAmelCase =convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
__lowerCAmelCase =make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , ) -> List[str]:
__lowerCAmelCase =MTaConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCAmelCase =UMTaEncoderModel(__lowerCamelCase )
else:
__lowerCAmelCase =UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("""Done""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 456
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase_ = TypeVar('''T''')
lowercase_ = Union[List[T], Tuple[T, ...]]
lowercase_ = Union[T, List[T], Dict[str, T]]
lowercase_ = Union[str, bytes, os.PathLike]
| 456
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[Any]=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : str=10 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : int=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ = (image_size // patch_size) ** 2
UpperCAmelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : Any = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = ViTMSNModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(2 )
UpperCAmelCase__ = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_UpperCAmelCase )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 603
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MobileNetV2FeatureExtractor']
UpperCAmelCase_ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
snake_case = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
snake_case = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = SqueezeBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , _snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _snake_case ) != tokenize_chinese_chars
):
_lowerCAmelCase : Tuple = getattr(_snake_case , normalizer_state.pop("type" ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[Any] = strip_accents
_lowerCAmelCase : Optional[int] = tokenize_chinese_chars
_lowerCAmelCase : Optional[Any] = normalizer_class(**_snake_case )
_lowerCAmelCase : Any = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=None ):
_lowerCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 587
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 587
| 1
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = torch.exp(_A )
snake_case_ = torch.sum(_A , dim=1 ) # sum of exp(x_i)
snake_case_ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : List[Any] ):
"""simple docstring"""
super().__init__()
snake_case_ = config.output_attentions
snake_case_ = config.output_hidden_states
snake_case_ = nn.ModuleList([BertLayer(__lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ = nn.ModuleList([BertHighway(__lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ = [-1 for _ in range(config.num_hidden_layers )]
def snake_case__ ( self : Union[str, Any] , __lowercase : Optional[int] ):
"""simple docstring"""
if (type(__lowercase ) is float) or (type(__lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ = x
else:
snake_case_ = x
def snake_case__ ( self : int , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def snake_case__ ( self : List[Any] , __lowercase : List[str] , __lowercase : List[Any]=None , __lowercase : Optional[int]=None , __lowercase : Optional[Any]=None , __lowercase : List[str]=None , ):
"""simple docstring"""
snake_case_ = ()
snake_case_ = ()
snake_case_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = layer_module(
__lowercase , __lowercase , head_mask[i] , __lowercase , __lowercase )
snake_case_ = layer_outputs[0]
if self.output_attentions:
snake_case_ = all_attentions + (layer_outputs[1],)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = current_outputs + (all_attentions,)
snake_case_ = self.highway[i](__lowercase )
# logits, pooled_output
if not self.training:
snake_case_ = highway_exit[0]
snake_case_ = entropy(__lowercase )
snake_case_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowercase , i + 1 )
else:
snake_case_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = outputs + (all_attentions,)
snake_case_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : List[Any] ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = config
snake_case_ = BertEmbeddings(__lowercase )
snake_case_ = DeeBertEncoder(__lowercase )
snake_case_ = BertPooler(__lowercase )
self.init_weights()
def snake_case__ ( self : Tuple ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return self.embeddings.word_embeddings
def snake_case__ ( self : Optional[int] , __lowercase : Any ):
"""simple docstring"""
snake_case_ = value
def snake_case__ ( self : List[Any] , __lowercase : Optional[int] ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : Tuple=None , __lowercase : Dict=None , __lowercase : str=None , __lowercase : Tuple=None , __lowercase : List[Any]=None , __lowercase : List[str]=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
snake_case_ = input_ids.size()
elif inputs_embeds is not None:
snake_case_ = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
snake_case_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ = torch.ones(__lowercase , device=__lowercase )
if encoder_attention_mask is None:
snake_case_ = torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
snake_case_ = torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ = self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ = encoder_attention_mask[:, None, None, :]
snake_case_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ = self.get_head_mask(__lowercase , self.config.num_hidden_layers )
snake_case_ = self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
snake_case_ = self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(__lowercase )
snake_case_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = message
snake_case_ = exit_layer # start from 1!
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowercase : Optional[int] ):
"""simple docstring"""
super().__init__()
snake_case_ = BertPooler(__lowercase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , config.num_labels )
def snake_case__ ( self : List[str] , __lowercase : str ):
"""simple docstring"""
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(__lowercase )
# "return" pooler_output
# BertModel
snake_case_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ = bmodel_output[1]
snake_case_ = self.dropout(__lowercase )
snake_case_ = self.classifier(__lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = config.num_labels
snake_case_ = config.num_hidden_layers
snake_case_ = DeeBertModel(__lowercase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def snake_case__ ( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Dict=None , __lowercase : Optional[Any]=None , __lowercase : Optional[int]=None , __lowercase : List[Any]=None , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : int=-1 , __lowercase : Tuple=False , ):
"""simple docstring"""
snake_case_ = self.num_layers
try:
snake_case_ = self.bert(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ = outputs[1]
snake_case_ = self.dropout(__lowercase )
snake_case_ = self.classifier(__lowercase )
snake_case_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ = e.message
snake_case_ = e.exit_layer
snake_case_ = outputs[0]
if not self.training:
snake_case_ = entropy(__lowercase )
snake_case_ = []
snake_case_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ = []
for highway_exit in outputs[-1]:
snake_case_ = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
snake_case_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ = (loss,) + outputs
if not self.training:
snake_case_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 376
|
def lowerCamelCase__ ( ):
'''simple docstring'''
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
snake_case_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(_A ) > 500 )
if __name__ == "__main__":
print(solution())
| 376
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=False ):
lowerCamelCase_ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ = ""
else:
lowerCamelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
lowerCamelCase_ = dct.pop(_lowerCamelCase )
lowerCamelCase_ = val
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]=False ):
lowerCamelCase_ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_lowerCamelCase , )
lowerCamelCase_ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_8_4 , num_labels=1_0_0_0 )
lowerCamelCase_ = False
# load original model from timm
lowerCamelCase_ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
lowerCamelCase_ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "imagenet-1k-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase_ = ViTHybridModel(_lowerCamelCase ).eval()
else:
lowerCamelCase_ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
lowerCamelCase_ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
lowerCamelCase_ = transform.transforms
lowerCamelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowerCamelCase_ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = transform(_lowerCamelCase ).unsqueeze(0 )
lowerCamelCase_ = processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
lowerCamelCase_ = model(_lowerCamelCase )
lowerCamelCase_ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
lowerCamelCase_ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1E-3 )
else:
lowerCamelCase_ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowercase : Tuple = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 720
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.id
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = weight
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
lowerCamelCase_ = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[Any] = '''umt5'''
__lowercase : Union[str, Any] = ['''past_key_values''']
def __init__( self , __SCREAMING_SNAKE_CASE=25_0112 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gated-gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="T5Tokenizer" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=__SCREAMING_SNAKE_CASE , tokenizer_class=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = vocab_size
__snake_case = d_model
__snake_case = d_kv
__snake_case = d_ff
__snake_case = num_layers
__snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case = num_heads
__snake_case = relative_attention_num_buckets
__snake_case = relative_attention_max_distance
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = initializer_factor
__snake_case = feed_forward_proj
__snake_case = use_cache
__snake_case = self.feed_forward_proj.split('''-''' )
__snake_case = act_info[-1]
__snake_case = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
__snake_case = '''gelu_new'''
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.num_heads
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.num_layers
class lowerCAmelCase ( __lowerCAmelCase):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__snake_case = '''past_encoder_sequence + sequence'''
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return 13
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 5E-4
| 24
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665
| 0
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class lowerCamelCase :
'''simple docstring'''
def __init__( self: List[Any] ) -> None:
snake_case_ :Union[str, Any] = [2, 1, 2, -1]
snake_case_ :Optional[int] = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case_ :Tuple = len(self.first_signal )
snake_case_ :Union[str, Any] = len(self.second_signal )
snake_case_ :Dict = max(snake_case , snake_case )
# create a zero matrix of max_length x max_length
snake_case_ :Union[str, Any] = [[0] * max_length for i in range(snake_case )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(snake_case ):
snake_case_ :List[Any] = deque(self.second_signal )
rotated_signal.rotate(snake_case )
for j, item in enumerate(snake_case ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case_ :Any = np.matmul(np.transpose(snake_case ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(snake_case , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 715
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a = ""
__a = ""
__a = ""
__a = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ):
'''simple docstring'''
snake_case_, snake_case_ :Optional[Any] = get_dataset(_lowercase, _lowercase )
print("""Processing...""" )
snake_case_, snake_case_, snake_case_ :str = update_image_and_anno(_lowercase, _lowercase, _lowercase )
for index, image in enumerate(_lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ :str = random_chars(32 )
snake_case_ :Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""", 1 )[0]
snake_case_ :List[str] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""", _lowercase, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(_lowercase )} with {file_name}""" )
snake_case_ :int = []
for anno in new_annos[index]:
snake_case_ :Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(_lowercase )
with open(f"""/{file_root}.txt""", """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = []
snake_case_ :List[Any] = []
for label_file in glob.glob(os.path.join(_lowercase, """*.txt""" ) ):
snake_case_ :List[str] = label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(_lowercase ) as in_file:
snake_case_ :Any = in_file.readlines()
snake_case_ :Any = os.path.join(_lowercase, f"""{label_name}.jpg""" )
snake_case_ :int = []
for obj_list in obj_lists:
snake_case_ :List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def A_ ( _lowercase, _lowercase, _lowercase = 1 ):
'''simple docstring'''
snake_case_ :Union[str, Any] = []
snake_case_ :Optional[int] = []
snake_case_ :Any = []
for idx in range(len(_lowercase ) ):
snake_case_ :Union[str, Any] = []
snake_case_ :List[Any] = img_list[idx]
path_list.append(_lowercase )
snake_case_ :List[str] = anno_list[idx]
snake_case_ :Optional[Any] = cva.imread(_lowercase )
if flip_type == 1:
snake_case_ :Optional[Any] = cva.flip(_lowercase, _lowercase )
for bbox in img_annos:
snake_case_ :str = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case_ :List[str] = cva.flip(_lowercase, _lowercase )
for bbox in img_annos:
snake_case_ :Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowercase )
new_imgs_list.append(_lowercase )
return new_imgs_list, new_annos_lists, path_list
def A_ ( _lowercase = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ :Any = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 310
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A ( __UpperCamelCase ) -> Tuple:
A__ = FileLock(str(tmpdir / 'foo.lock' ) )
A__ = FileLock(str(tmpdir / 'foo.lock' ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
A__ = time.time()
locka.acquire(__UpperCamelCase )
assert time.time() - _start > timeout
def A ( __UpperCamelCase ) -> Tuple:
A__ = 'a' * 1_000 + '.lock'
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
locka.acquire(0 )
| 9
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9
| 1
|
"""simple docstring"""
import functools
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or not all(isinstance(__UpperCAmelCase ,__UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__UpperCAmelCase ) != 3 or not all(isinstance(__UpperCAmelCase ,__UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__UpperCAmelCase ) == 0:
return 0
if min(__UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
_lowercase : List[Any] = set(__UpperCAmelCase )
@functools.cache
def dynamic_programming(__UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_lowercase : Optional[Any] = precision
_lowercase : Dict = ceil(precision / 14 )
_lowercase : int = 426_880 * Decimal(10_005 ).sqrt()
_lowercase : Optional[Any] = 1
_lowercase : Union[str, Any] = 13_591_409
_lowercase : Optional[int] = Decimal(__UpperCAmelCase )
for k in range(1 ,__UpperCAmelCase ):
_lowercase : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 283
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCamelCase ( snake_case_ ):
a__: Union[str, Any] = 'trocr'
a__: Optional[Any] = ['past_key_values']
a__: Union[str, Any] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , UpperCAmelCase=5_0265 , UpperCAmelCase=1024 , UpperCAmelCase=12 , UpperCAmelCase=16 , UpperCAmelCase=4096 , UpperCAmelCase="gelu" , UpperCAmelCase=512 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ):
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = activation_function
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = init_std
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = scale_embedding
lowerCamelCase_ = use_learned_position_embeddings
lowerCamelCase_ = layernorm_embedding
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 29
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33
| 0
|
from string import ascii_lowercase, ascii_uppercase
def _snake_case (__lowercase):
if not sentence:
return ""
UpperCamelCase_ = dict(zip(__lowercase , __lowercase))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case__ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'])
def _snake_case (__lowercase , __lowercase):
inspect_dataset(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.parametrize('path' , ['accuracy'])
def _snake_case (__lowercase , __lowercase):
inspect_metric(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_info(__lowercase , config_name=__lowercase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_config_info(__lowercase , config_name=__lowercase)
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_names(__lowercase)
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_split_names(__lowercase , config_name=__lowercase)
| 618
| 1
|
from math import factorial
UpperCamelCase = {str(digit): factorial(digit) for digit in range(10)}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE = 60 , SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
_lowercase : Tuple = 0
# the cached sizes of the previous chains
_lowercase : dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE ):
# The temporary set will contain the elements of the chain
_lowercase : Dict = set()
_lowercase : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowercase : Tuple = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE )
chain_set_length += 1
_lowercase : int = digit_factorial_sum(SCREAMING_SNAKE_CASE )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowercase : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 66
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66
| 1
|
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> Union[str, Any]:
if not head:
return True
# split the list to two parts
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = head.next, head
while fast and fast.next:
__SCREAMING_SNAKE_CASE = fast.next.next
__SCREAMING_SNAKE_CASE = slow.next
__SCREAMING_SNAKE_CASE = slow.next
__SCREAMING_SNAKE_CASE = None # Don't forget here! But forget still works!
# reverse the second part
__SCREAMING_SNAKE_CASE = None
while second:
__SCREAMING_SNAKE_CASE = second.next
__SCREAMING_SNAKE_CASE = node
__SCREAMING_SNAKE_CASE = second
__SCREAMING_SNAKE_CASE = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__SCREAMING_SNAKE_CASE = node.next
__SCREAMING_SNAKE_CASE = head.next
return True
def _a ( UpperCAmelCase__ ) -> Tuple:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = head
while fast and fast.next:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fast.next.next, slow.next
# 2. Push the second half into the stack
__SCREAMING_SNAKE_CASE = [slow.val]
while slow.next:
__SCREAMING_SNAKE_CASE = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__SCREAMING_SNAKE_CASE = cur.next
return True
def _a ( UpperCAmelCase__ ) -> Any:
if not head or not head.next:
return True
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
while head:
if head.val in d:
d[head.val].append(lowercase__ )
else:
__SCREAMING_SNAKE_CASE = [pos]
__SCREAMING_SNAKE_CASE = head.next
pos += 1
__SCREAMING_SNAKE_CASE = pos - 1
__SCREAMING_SNAKE_CASE = 0
for v in d.values():
if len(lowercase__ ) % 2 != 0:
middle += 1
else:
__SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(lowercase__ ) ):
if v[i] + v[len(lowercase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 702
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = XLMRobertaTokenizer
lowerCAmelCase = XLMRobertaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''<pad>'''
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _a ( self : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@cached_property
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''Hello World!'''
__SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__SCREAMING_SNAKE_CASE = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 690
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self : Optional[int] , a_ : Union[str, Any] , a_ : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = False
super().__init__(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
def __call__( self : List[Any] , a_ : ImageInput = None , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : int , )-> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(a_ )
return encoding_image_processor
def __lowercase( self : Any , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : List[Any] , *a_ : Any , **a_ : Dict )-> int:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = tempfile.mkdtemp()
__UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCamelCase : Optional[Any] = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
"do_convert_rgb": True,
}
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase : Dict = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : str = self.get_tokenizer()
__UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase : Any = self.get_image_processor()
__UpperCamelCase : str = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[Any] = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__UpperCamelCase : Tuple = self.get_image_processor(do_normalize=__UpperCamelCase )
__UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase : List[str] = image_processor(__UpperCamelCase , return_tensors="np" )
__UpperCamelCase : List[Any] = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.get_image_processor()
__UpperCamelCase : Union[str, Any] = self.get_tokenizer()
__UpperCamelCase : int = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : int = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : int = processor(text=__UpperCamelCase )
__UpperCamelCase : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : str = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : List[Any] = self.prepare_image_inputs()
__UpperCamelCase : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = self.get_image_processor()
__UpperCamelCase : Any = self.get_tokenizer()
__UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : str = processor.batch_decode(__UpperCamelCase )
__UpperCamelCase : Dict = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : Tuple = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : Optional[int] = self.prepare_image_inputs()
__UpperCamelCase : Tuple = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 327
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_A : str = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 330
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_A : List[str] = logging.get_logger(__name__)
_A : Optional[Any] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
_A : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase_ ( snake_case_ : str ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(snake_case_ )
__lowerCAmelCase = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_ , """__name__""" , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Any , ) -> int:
'''simple docstring'''
__lowerCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_ , encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class _lowercase :
'''simple docstring'''
def __init__( self : List[str] ) -> int:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE__ )
def a ( cls : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
__lowerCAmelCase = kwargs.pop("""config""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs.pop("""trust_remote_code""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config_dict.get("""feature_extractor_type""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# It could be in `config.feature_extractor_type``
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE__ , """feature_extractor_type""" , SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowerCAmelCase = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = feature_extractor_auto_map is not None
__lowerCAmelCase = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs.pop("""code_revision""" , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE__ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 330
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __UpperCAmelCase (_snake_case ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'realm'
def __init__( self , snake_case_=30_522 , snake_case_=768 , snake_case_=128 , snake_case_=12 , snake_case_=12 , snake_case_=8 , snake_case_=3_072 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=256 , snake_case_=10 , snake_case_=1E-3 , snake_case_=5 , snake_case_=320 , snake_case_=13_353_718 , snake_case_=5_000 , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
# Common config
A__ : List[str] = vocab_size
A__ : Optional[Any] = max_position_embeddings
A__ : int = hidden_size
A__ : Any = retriever_proj_size
A__ : str = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Dict = num_candidates
A__ : int = intermediate_size
A__ : Optional[int] = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = initializer_range
A__ : str = type_vocab_size
A__ : Any = layer_norm_eps
# Reader config
A__ : Union[str, Any] = span_hidden_size
A__ : Union[str, Any] = max_span_width
A__ : List[Any] = reader_layer_norm_eps
A__ : List[str] = reader_beam_size
A__ : Optional[int] = reader_seq_len
# Retrieval config
A__ : Tuple = num_block_records
A__ : Any = searcher_beam_size
| 363
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "OwlViTImageProcessor"
UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str ):
UpperCAmelCase__ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCamelCase , )
UpperCAmelCase__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str="max_length" , __lowerCamelCase : Any="np" , **__lowerCamelCase : Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
UpperCAmelCase__ :Any = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
UpperCAmelCase__ :Tuple = []
# Maximum number of queries across batch
UpperCAmelCase__ :List[str] = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ :str = t + [''' '''] * (max_num_queries - len(__lowerCamelCase ))
UpperCAmelCase__ :Tuple = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
UpperCAmelCase__ :List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Any = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Union[str, Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :List[str] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :Optional[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
UpperCAmelCase__ :List[Any] = BatchEncoding()
UpperCAmelCase__ :Union[str, Any] = input_ids
UpperCAmelCase__ :Dict = attention_mask
if query_images is not None:
UpperCAmelCase__ :Tuple = BatchEncoding()
UpperCAmelCase__ :int = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
UpperCAmelCase__ :Optional[int] = query_pixel_values
if images is not None:
UpperCAmelCase__ :str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ :Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCamelCase , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCamelCase , )
return self.image_processor
| 467
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase ( snake_case : Tuple , snake_case : Tuple , snake_case : List[str] ):
_lowerCAmelCase:Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(snake_case , config=snake_case )
_lowerCAmelCase:int = downstream_dict['''projector.weight''']
_lowerCAmelCase:List[str] = downstream_dict['''projector.bias''']
_lowerCAmelCase:List[Any] = downstream_dict['''model.post_net.linear.weight''']
_lowerCAmelCase:Dict = downstream_dict['''model.post_net.linear.bias''']
return model
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Tuple , snake_case : int ):
_lowerCAmelCase:Any = WavaVecaForAudioFrameClassification.from_pretrained(snake_case , config=snake_case )
_lowerCAmelCase:Dict = downstream_dict['''model.linear.weight''']
_lowerCAmelCase:Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any] ):
_lowerCAmelCase:Union[str, Any] = WavaVecaForXVector.from_pretrained(snake_case , config=snake_case )
_lowerCAmelCase:Tuple = downstream_dict['''connector.weight''']
_lowerCAmelCase:str = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCAmelCase:int = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_lowerCAmelCase:Optional[int] = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_lowerCAmelCase:List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_lowerCAmelCase:int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_lowerCAmelCase:List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_lowerCAmelCase:Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_lowerCAmelCase:List[Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def UpperCAmelCase ( snake_case : List[Any] , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] ):
_lowerCAmelCase:Optional[int] = torch.load(snake_case , map_location='''cpu''' )
_lowerCAmelCase:str = checkpoint['''Downstream''']
_lowerCAmelCase:Union[str, Any] = WavaVecaConfig.from_pretrained(snake_case )
_lowerCAmelCase:Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
snake_case , return_attention_mask=snake_case , do_normalize=snake_case )
_lowerCAmelCase:Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_lowerCAmelCase:Optional[int] = convert_classification(snake_case , snake_case , snake_case )
elif arch.endswith('''ForAudioFrameClassification''' ):
_lowerCAmelCase:Union[str, Any] = convert_diarization(snake_case , snake_case , snake_case )
elif arch.endswith('''ForXVector''' ):
_lowerCAmelCase:int = convert_xvector(snake_case , snake_case , snake_case )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_lowerCAmelCase:Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 439
|
"""simple docstring"""
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return x if y == 0 else greatest_common_divisor(snake_case , x % y )
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (x * y) // greatest_common_divisor(snake_case , snake_case )
def UpperCAmelCase ( snake_case : int = 20 ):
_lowerCAmelCase:List[Any] = 1
for i in range(1 , n + 1 ):
_lowerCAmelCase:List[str] = lcm(snake_case , snake_case )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 439
| 1
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60
| 1
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class _UpperCamelCase ( __UpperCAmelCase):
'''simple docstring'''
def a__ ( self , a_ , a_ , a_=None , a_=None ) -> int:
lowercase : str = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
lowercase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , __UpperCAmelCase , )
class _UpperCamelCase ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self , a_ ) -> Optional[int]:
super().__init__(a_ )
lowercase : str = BertEncoderWithPabee(a_ )
self.init_weights()
lowercase : Tuple = 0
lowercase : Tuple = 0
lowercase : Dict = 0
lowercase : List[Any] = 0
def a__ ( self , a_ ) -> Union[str, Any]:
lowercase : Any = threshold
def a__ ( self , a_ ) -> List[str]:
lowercase : Dict = patience
def a__ ( self ) -> Dict:
lowercase : Dict = 0
lowercase : Optional[Any] = 0
def a__ ( self ) -> str:
lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowercase : Optional[int] = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def a__ ( self , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=False , ) -> Tuple:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
lowercase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
lowercase : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
lowercase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase : str = torch.ones(a_ , device=a_ )
if token_type_ids is None:
lowercase : Optional[Any] = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase : List[Any] = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase , lowercase , lowercase : List[Any] = encoder_hidden_states.size()
lowercase : Optional[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase : str = torch.ones(a_ , device=a_ )
lowercase : str = self.invert_attention_mask(a_ )
else:
lowercase : Any = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase : Tuple = self.get_head_mask(a_ , self.config.num_hidden_layers )
lowercase : Dict = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
lowercase : Any = embedding_output
if self.training:
lowercase : Union[str, Any] = []
for i in range(self.config.num_hidden_layers ):
lowercase : List[str] = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
lowercase : Optional[Any] = self.pooler(a_ )
lowercase : str = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
lowercase : Optional[int] = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowercase : Any = self.pooler(encoder_outputs[0] )
lowercase : Tuple = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
lowercase : Optional[Any] = 0
lowercase : List[str] = None
lowercase : List[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowercase : str = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
lowercase : Optional[int] = self.pooler(a_ )
lowercase : Optional[Any] = output_layers[i](a_ )
if regression:
lowercase : str = logits.detach()
if patient_result is not None:
lowercase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowercase : Optional[Any] = 0
else:
lowercase : Dict = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowercase : Union[str, Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
lowercase : List[Any] = 0
lowercase : List[str] = logits
if patient_counter == self.patience:
break
lowercase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ''' , __UpperCAmelCase , )
class _UpperCamelCase ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self , a_ ) -> int:
super().__init__(a_ )
lowercase : str = config.num_labels
lowercase : List[str] = BertModelWithPabee(a_ )
lowercase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowercase : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def a__ ( self , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , ) -> Any:
lowercase : List[Any] = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase : List[str] = (logits[-1],)
if labels is not None:
lowercase : Union[str, Any] = None
lowercase : Any = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
lowercase : List[Any] = MSELoss()
lowercase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowercase : int = CrossEntropyLoss()
lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowercase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase : str = (total_loss / total_weights,) + outputs
return outputs
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 425
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {'vocab_file': 'spiece.model'}
lowerCamelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False , UpperCamelCase : Dict=True , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : List[Any]="<sep>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : int="<cls>" , UpperCamelCase : int="<mask>" , UpperCamelCase : str=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ = jieba
lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Dict , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : int , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ (self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 460
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowerCamelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCamelCase : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """whisper"""
lowerCAmelCase__ : Dict = ["""past_key_values"""]
lowerCAmelCase__ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : int , UpperCamelCase : Optional[int]=51865 , UpperCamelCase : Any=80 , UpperCamelCase : Dict=6 , UpperCamelCase : str=4 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Tuple=1536 , UpperCamelCase : Dict=1536 , UpperCamelCase : Any=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=50257 , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Dict=256 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[Any]=False , UpperCamelCase : int=1500 , UpperCamelCase : List[str]=448 , UpperCamelCase : int=50256 , UpperCamelCase : Optional[int]=50256 , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=[220, 50256] , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=256 , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.05 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=10 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=7 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: '''batch'''}
else:
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
return common_inputs
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , UpperCamelCase : int = 22050 , UpperCamelCase : float = 5.0 , UpperCamelCase : int = 220 , ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowercase__ = encoder_inputs['''input_features'''].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowercase__ = encoder_inputs.pop('''input_features''' )
lowercase__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return 1E-3
| 460
| 1
|
from math import factorial
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A : List[Any] = float(factorial(_lowerCAmelCase ) )
coefficient /= factorial(_lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_:Optional[int] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Tuple = ["""DeiTFeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 520
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.