code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from math import factorial
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
return sum(map(_lowerCAmelCase , str(factorial(_lowerCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 77
|
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}")
| 286
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
if openai_config_file == "":
_snake_case : Optional[int] = OpenAIGPTConfig()
else:
_snake_case : Union[str, Any] = OpenAIGPTConfig.from_json_file(snake_case__ )
_snake_case : Tuple = OpenAIGPTModel(snake_case__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
_snake_case : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_snake_case : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , snake_case__ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 352
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase( __a , __a ):
'''simple docstring'''
lowercase__ = "swin"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Any, a_: List[str]=224, a_: List[Any]=4, a_: List[Any]=3, a_: Dict=96, a_: List[str]=[2, 2, 6, 2], a_: int=[3, 6, 12, 24], a_: int=7, a_: str=4.0, a_: Optional[Any]=True, a_: Dict=0.0, a_: List[Any]=0.0, a_: List[str]=0.1, a_: Union[str, Any]="gelu", a_: Dict=False, a_: Union[str, Any]=0.02, a_: Optional[int]=1E-5, a_: Optional[int]=32, a_: Tuple=None, a_: Union[str, Any]=None, **a_: Any, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Any = image_size
_snake_case : List[Any] = patch_size
_snake_case : Tuple = num_channels
_snake_case : str = embed_dim
_snake_case : Union[str, Any] = depths
_snake_case : int = len(a_ )
_snake_case : Union[str, Any] = num_heads
_snake_case : List[str] = window_size
_snake_case : str = mlp_ratio
_snake_case : Union[str, Any] = qkv_bias
_snake_case : Dict = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : Union[str, Any] = drop_path_rate
_snake_case : Optional[int] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Any = int(embed_dim * 2 ** (len(a_ ) - 1) )
_snake_case : Any = ["""stem"""] + [f"stage{idx}" for idx in range(1, len(a_ ) + 1 )]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=a_, out_indices=a_, stage_names=self.stage_names )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return 1E-4
| 132
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 96
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase (lowercase_: str ) -> Dict:
A__ : int = int(lowercase_ )
A__ , A__ , A__ : Tuple = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Tuple , lowercase_: Any=300 ) -> Optional[int]:
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ : Tuple = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A__ : str = f"""{elt:.6f}""" if isinstance(lowercase_ , lowercase_ ) else str(lowercase_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _a :
'''simple docstring'''
UpperCAmelCase__: str = 5
UpperCAmelCase__: int = 0.2
def __init__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = 300 , ):
A__ : Optional[int] = total
A__ : Tuple = """""" if prefix is None else prefix
A__ : str = leave
A__ : str = parent
A__ : int = width
A__ : Dict = None
A__ : List[str] = None
A__ : Optional[int] = None
def __A ( self , A__ , A__ = False , A__ = None ):
A__ : Tuple = value
if comment is not None:
A__ : Any = comment
if self.last_value is None:
A__ : int = time.time()
A__ : Dict = value
A__ : int = None
A__ : int = self.warmup
A__ : str = 1
self.update_bar(A__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A__ : Any = time.time()
A__ : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A__ : Dict = self.elapsed_time / (value - self.start_value)
else:
A__ : List[str] = None
if value >= self.total:
A__ : Optional[Any] = self.total
A__ : List[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A__ : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(A__ )
A__ : Any = value
A__ : List[str] = current_time
if self.average_time_per_item is None:
A__ : str = 1
else:
A__ : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __A ( self , A__ , A__=None ):
A__ : Tuple = """ """ * (len(str(self.total ) ) - len(str(A__ ) )) + str(A__ )
if self.elapsed_time is None:
A__ : Union[str, Any] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
A__ : Tuple = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
A__ : Optional[int] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __A ( self ):
A__ : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A__ : str = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__=None ):
super().__init__(A__ )
A__ : Optional[Any] = None if column_names is None else [column_names]
A__ : Optional[Any] = None
def __A ( self ):
A__ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A__ : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self , A__ ):
if self.inner_table is None:
A__ : List[str] = [list(values.keys() ), list(values.values() )]
else:
A__ : Optional[Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A__ )
A__ : Any = columns
self.inner_table.append([values[c] for c in columns] )
def __A ( self , A__ , A__=None , A__=300 ):
A__ : Optional[Any] = NotebookProgressBar(A__ , prefix=A__ , parent=self , width=A__ )
return self.child_bar
def __A ( self ):
A__ : List[str] = None
self.display()
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self ):
A__ : int = None
A__ : List[str] = None
A__ : Union[str, Any] = False
def __A ( self , A__ , A__ , A__ , **A__ ):
A__ : List[str] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
A__ : Dict = 0
A__ : Tuple = 0
A__ : Optional[int] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
A__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , A__ )
def __A ( self , A__ , A__ , A__ , **A__ ):
A__ : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
A__ : str = False
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
if not has_length(A__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A__ : Union[str, Any] = self.training_tracker.add_child(len(A__ ) )
else:
A__ : Tuple = NotebookProgressBar(len(A__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __A ( self , A__ , A__ , A__ , **A__ ):
if self.prediction_bar is not None:
self.prediction_bar.close()
A__ : List[str] = None
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A__ : Dict = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
A__ : List[Any] = state.global_step
self.training_tracker.write_line(A__ )
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
if self.training_tracker is not None:
A__ : Tuple = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
A__ : Dict = log["""loss"""]
break
if self.first_column == "Epoch":
A__ : List[Any] = int(state.epoch )
else:
A__ : Optional[Any] = state.global_step
A__ : Optional[Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
A__ : Optional[int] = re.sub(r"""\_loss$""" , """""" , A__ )
A__ : int = metrics.pop("""total_flos""" , A__ )
A__ : int = metrics.pop("""epoch""" , A__ )
A__ : Optional[int] = metrics.pop(F"""{metric_key_prefix}_runtime""" , A__ )
A__ : Any = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , A__ )
A__ : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , A__ )
A__ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , A__ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
A__ : Any = v
else:
A__ : Optional[Any] = k.split("""_""" )
A__ : Any = """ """.join([part.capitalize() for part in splits[1:]] )
A__ : List[str] = v
self.training_tracker.write_line(A__ )
self.training_tracker.remove_child()
A__ : Dict = None
# Evaluation takes a long time so we should force the next update.
A__ : Union[str, Any] = True
def __A ( self , A__ , A__ , A__ , **A__ ):
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=A__ )
A__ : Optional[int] = None
| 192
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =0
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =3.0
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def __a ( self : Optional[Any] ):
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase__ : str = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase__ : Optional[int] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase__ : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase : str = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_lowerCAmelCase : Any = Accelerator(kwargs_handlers=[ddp_scaler])
_lowerCAmelCase : List[str] = torch.nn.Linear(100, 200)
_lowerCAmelCase : Tuple = accelerator.prepare(model)
# Check the values changed in kwargs
_lowerCAmelCase : int = """"""
_lowerCAmelCase : List[str] = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 298
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 1
|
def _UpperCamelCase ( snake_case__ ) -> list:
__UpperCAmelCase : Dict = [0] * len(snake_case__ )
for i in range(1, len(snake_case__ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase : Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase : Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase : Tuple = j
return prefix_result
def _UpperCamelCase ( snake_case__ ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''▁'''
lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _a ( _lowercase):
_a : List[Any] = VOCAB_FILES_NAMES
_a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : Any = ['''input_ids''', '''attention_mask''']
_a : List[int] = []
_a : List[int] = []
def __init__( self : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , _SCREAMING_SNAKE_CASE : List[Any]="</s>" , _SCREAMING_SNAKE_CASE : Any="<s>" , _SCREAMING_SNAKE_CASE : Any="<unk>" , _SCREAMING_SNAKE_CASE : str="<pad>" , _SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Tuple=False , **_SCREAMING_SNAKE_CASE : Union[str, Any] , )-> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : Dict = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Dict = len(self.sp_model )
lowerCAmelCase__ : Optional[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCAmelCase__ : Optional[Any] = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase__ : Dict = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] )-> List[Any]:
lowerCAmelCase__ : List[str] = self.__dict__.copy()
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> str:
lowerCAmelCase__ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase__( self : Any )-> Optional[int]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase__( self : List[Any] )-> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : str )-> None:
lowerCAmelCase__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None , _SCREAMING_SNAKE_CASE : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = [1] * len(self.prefix_tokens )
lowerCAmelCase__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase__ : Optional[Any] = src_lang
lowerCAmelCase__ : int = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = tgt_lang_id
return inputs
def UpperCAmelCase__( self : str )-> str:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[str] )-> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ : Union[str, Any] = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str = "eng_Latn" , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , _SCREAMING_SNAKE_CASE : str = "fra_Latn" , **_SCREAMING_SNAKE_CASE : Any , )-> BatchEncoding:
lowerCAmelCase__ : List[Any] = src_lang
lowerCAmelCase__ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__( self : Optional[int] )-> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] )-> None:
lowerCAmelCase__ : str = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ : str = [self.cur_lang_code]
lowerCAmelCase__ : int = [self.eos_token_id]
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : str )-> None:
lowerCAmelCase__ : Optional[int] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase__ : Optional[int] = [self.eos_token_id]
| 369
|
# using dfs for finding eulerian path traversal
def lowerCamelCase_ ( _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = True, True
lowerCAmelCase__ : Any = dfs(_a , _a , _a , _a )
return path
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = -1
for i in range(_a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase__ : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = check_circuit_or_path(_a , _a )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase__ : Optional[int] = 1
if check == 2:
lowerCAmelCase__ : Any = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase__ : Optional[int] = dfs(_a , _a , _a )
print(_a )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase__ : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase__ : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase__ : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase__ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase__ : Optional[Any] = 10
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
if __name__ == "__main__":
main()
| 211
| 0
|
def _a ( a :int ) -> list[int]:
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
a = [True] * (num + 1)
a = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a ):
a = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0
| 1
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
"""simple docstring"""
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
SCREAMING_SNAKE_CASE__ : int = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Tuple = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
SCREAMING_SNAKE_CASE__ : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> List[str]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ : str = dataset
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Dict = con
SCREAMING_SNAKE_CASE__ : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : List[str] = num_proc
SCREAMING_SNAKE_CASE__ : Union[str, Any] = to_sql_kwargs
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.to_sql_kwargs.pop("""sql""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.to_sql_kwargs.pop("""con""" , _a )
SCREAMING_SNAKE_CASE__ : List[str] = self.to_sql_kwargs.pop("""index""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs )
return written
def _a ( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = args
SCREAMING_SNAKE_CASE__ : List[Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Any = batch.to_pandas()
SCREAMING_SNAKE_CASE__ : Any = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def _a ( self , _a , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE__ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 367
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ):
'''simple docstring'''
__UpperCamelCase :List[Any] = right or len(SCREAMING_SNAKE_CASE ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
"""simple docstring"""
a :dict[tuple[int, int, int], int] = {}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , __lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Optional[Any] = _calculate(days - 1 , __lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _lowercase ( __lowerCAmelCase = 30 ) -> int:
return _calculate(__lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 132
| 0
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase : Tuple = False, False, False
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = '''dict'''
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Audio''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : List[Any] ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase_ , lowercase_ ):
return {"bytes": None, "path": value}
elif isinstance(lowercase_ , lowercase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase_ : List[str] = BytesIO()
sf.write(lowercase_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase_ : int = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowercase_ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32767
lowercase_ : Tuple = BytesIO(bytes() )
sf.write(lowercase_ , lowercase_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : dict , lowercase_ : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowercase_ : str = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowercase_ : str = xsplitext(lowercase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowercase_ : Union[str, Any] = token_per_repo_id or {}
lowercase_ : Optional[Any] = path.split("""::""" )[-1]
try:
lowercase_ : Optional[int] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase_ : int = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Optional[int] = sf.read(lowercase_ )
else:
lowercase_ : List[str] = sf.read(lowercase_ )
lowercase_ : int = array.T
if self.mono:
lowercase_ : List[Any] = librosa.to_mono(lowercase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase_ : Any = librosa.resample(lowercase_ , orig_sr=lowercase_ , target_sr=self.sampling_rate )
lowercase_ : List[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowercase_ : Optional[Any] = pa.array([Audio().encode_example(lowercase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Any = storage.field("""path""" )
else:
lowercase_ : List[str] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : str ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : Optional[Any] = f.read()
return bytes_
lowercase_ : Any = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
| 357
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class A ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Any = load_tool("text-classification" )
self.tool.setup()
__UpperCamelCase : List[str] = load_tool("text-classification" , remote=_UpperCAmelCase )
def a_ (self ) -> Any:
__UpperCamelCase : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
def a_ (self ) -> str:
__UpperCamelCase : Optional[int] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
def a_ (self ) -> List[Any]:
__UpperCamelCase : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Tuple = 0
if start < end:
lowerCAmelCase__ : Any = randint(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Tuple = a[pivot]
lowerCAmelCase__ : Any = temp
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[str] = randint(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = a[end]
lowerCAmelCase__ : Optional[Any] = a[pivot]
lowerCAmelCase__ : Optional[Any] = temp
lowerCAmelCase__ : List[Any] = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : Union[str, Any] = new_pivot_index + 1
lowerCAmelCase__ : Dict = a[new_pivot_index]
lowerCAmelCase__ : Optional[int] = a[index]
lowerCAmelCase__ : Dict = temp
lowerCAmelCase__ : Any = a[new_pivot_index + 1]
lowerCAmelCase__ : Optional[Any] = a[end]
lowerCAmelCase__ : Tuple = temp
return new_pivot_index + 1, count
_A = TemporaryFile()
_A = 1_0_0 # 1000 elements are to be sorted
_A , _A = 0, 1 # mean and standard deviation
_A = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
_A = np.load(outfile)
_A = len(M) - 1
_A = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 212
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_A = 2_5_6_0_4_7
_A = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Any = NllbTokenizer
_lowerCamelCase :Dict = NllbTokenizerFast
_lowerCamelCase :str = True
_lowerCamelCase :Optional[Any] = True
_lowerCamelCase :Union[str, Any] = {}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Optional[int] = NllbTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = NllbTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : int = tempfile.mkdtemp()
lowerCAmelCase__ : Tuple = tokenizer_r.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase__ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : int = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ : List[str] = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[str] = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : int = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
lowerCAmelCase__ : str = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Dict = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_seqaseq:
return
lowerCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
lowerCAmelCase__ : Any = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
lowerCAmelCase__ : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase , tgt_texts=UpperCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowerCAmelCase__ : str = tokenizer.prepare_seqaseq_batch(
UpperCamelCase , tgt_texts=UpperCamelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCAmelCase__ : int = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : str = [AddedToken("""<special>""" , lstrip=UpperCamelCase )]
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Dict = tokenizer_r.encode("""Hey this is a <special> token""" )
lowerCAmelCase__ : Dict = tokenizer_r.encode("""<special>""" , add_special_tokens=UpperCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_p.encode("""Hey this is a <special> token""" )
lowerCAmelCase__ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :int = "facebook/nllb-200-distilled-600M"
_lowerCamelCase :List[str] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_lowerCamelCase :Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_lowerCamelCase :Tuple = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
lowerCAmelCase__ : Optional[Any] = 1
return cls
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase__ : str = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
lowerCAmelCase__ : Any = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
lowerCAmelCase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCamelCase )
lowerCAmelCase__ : int = 10
lowerCAmelCase__ : Any = self.tokenizer(UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = NllbTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase__ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowerCAmelCase__ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
self.assertEqual(UpperCamelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.tokenizer(self.src_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase__ : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase__ : str = targets["""input_ids"""]
lowerCAmelCase__ : Any = shift_tokens_right(
UpperCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 212
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 146
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'xmod'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(A )
_a = default_language
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowercase_ :int ) -> None:
UpperCAmelCase = num_of_nodes
UpperCAmelCase = []
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int , lowercase_ :int ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase__ ( self :Any , lowercase_ :int ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :int ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase = self.find_component(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :list[int] , lowercase_ :int , lowercase_ :int ) -> None:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_ )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase = self.find_component(lowercase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> None:
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = self.m_component[u]
UpperCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = self.m_component[u]
UpperCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCAmelCase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( lowercase_ ):
random.seed(lowercase_ )
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# ^^ safe to call this function even if cuda is not available
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :Iterable[torch.nn.Parameter] , lowercase_ :float = 0.9999 , lowercase_ :float = 0.0 , lowercase_ :int = 0 , lowercase_ :bool = False , lowercase_ :Union[float, int] = 1.0 , lowercase_ :Union[float, int] = 2 / 3 , lowercase_ :Optional[Any] = None , lowercase_ :Dict[str, Any] = None , **lowercase_ :Dict , ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase = True
if kwargs.get('max_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['max_value']
if kwargs.get('min_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['min_value']
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowercase_ ) is not None:
UpperCAmelCase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs['device'] )
UpperCAmelCase = None
UpperCAmelCase = decay
UpperCAmelCase = min_decay
UpperCAmelCase = update_after_step
UpperCAmelCase = use_ema_warmup
UpperCAmelCase = inv_gamma
UpperCAmelCase = power
UpperCAmelCase = 0
UpperCAmelCase = None # set in `step()`
UpperCAmelCase = model_cls
UpperCAmelCase = model_config
@classmethod
def UpperCAmelCase__ ( cls :int , lowercase_ :Union[str, Any] , lowercase_ :Any ) -> "EMAModel":
UpperCAmelCase , UpperCAmelCase = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
UpperCAmelCase = model_cls.from_pretrained(lowercase_ )
UpperCAmelCase = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
UpperCAmelCase = self.model_cls.from_config(self.model_config )
UpperCAmelCase = self.state_dict()
state_dict.pop('shadow_params' , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> float:
UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase = (1 + step) / (10 + step)
UpperCAmelCase = min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase = max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
UpperCAmelCase = list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase = self.get_decay(self.optimization_step )
UpperCAmelCase = decay
UpperCAmelCase = 1 - decay
UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple=None , lowercase_ :Union[str, Any]=None ) -> None:
UpperCAmelCase = [
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> None:
UpperCAmelCase = copy.deepcopy(lowercase_ )
UpperCAmelCase = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
UpperCAmelCase = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError('Invalid min_decay' )
UpperCAmelCase = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError('Invalid optimization_step' )
UpperCAmelCase = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError('Invalid update_after_step' )
UpperCAmelCase = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError('Invalid use_ema_warmup' )
UpperCAmelCase = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
UpperCAmelCase = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
UpperCAmelCase = state_dict.get('shadow_params' , lowercase_ )
if shadow_params is not None:
UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 181
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int ) -> bool:
__a = str(__UpperCAmelCase )
return len(__UpperCAmelCase ) == 9 and set(__UpperCAmelCase ) == set('''123456789''' )
def lowercase ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
__a = 100002 * base_num
if is_9_pandigital(__UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__a = 1002003 * base_num
if is_9_pandigital(__UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 45
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a ( _lowerCamelCase ):
def A_ ( self : str ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : Union[str, Any] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : int ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def A_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ):
snake_case_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A_ ( self : str ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case_ = dataset
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A_ ( self : str , lowercase_ : bool ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
snake_case_ = os.path.join(self.tmpdirname , '''dataset''' )
snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , )
return retriever
def A_ ( self : Tuple ):
snake_case_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) )
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A_ ( self : Optional[Any] ):
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : str ):
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case_ = self.get_dummy_dataset()
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : int ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : Any ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : Any ):
snake_case_ = 1
snake_case_ = self.get_dummy_legacy_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ):
snake_case_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : List[str] ):
import torch
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
snake_case_ ,snake_case_ ,snake_case_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , np.ndarray )
snake_case_ = retriever(
lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : Tuple ):
snake_case_ = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
retriever.set_ctx_encoder_tokenizer(lowercase_ )
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
self.assertEqual(
len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
| 56
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a ):
return 10 - x * x
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
# Bolzano theory in order to find if there is a root between a and b
if equation(__a ) * equation(__a ) >= 0:
raise ValueError('Wrong space!' )
snake_case_ : Optional[Any] = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ : str = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
snake_case_ : List[Any] = c
else:
snake_case_ : List[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 88
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE = re.compile(RF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 88
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
if n == 0:
return 0
lowerCAmelCase__ : Union[str, Any] = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : int = max(
lowerCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase_ ) )
return max_revue
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase__ : Any = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[Any] = max(
lowerCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase_ , lowerCamelCase_ ) , )
lowerCAmelCase__ : Dict = max_revenue
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase__ : int = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase__ : str = 0
for i in range(1 , n + 1 ):
lowerCAmelCase__ : Tuple = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase__ : Any = max(lowerCamelCase_ , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase__ : Optional[Any] = max_revenue_i
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if n < 0:
lowerCAmelCase__ : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCamelCase_ )
if n > len(lowerCamelCase_ ):
lowerCAmelCase__ : Tuple = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(lowerCamelCase_ )}'''
)
raise ValueError(lowerCamelCase_ )
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : List[str] = [6, 10, 12, 15, 20, 23]
lowerCAmelCase__ : Any = len(lowerCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase__ : Tuple = 36
lowerCAmelCase__ : int = top_down_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : Tuple = bottom_up_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : int = naive_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 212
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,):
_lowercase : int = parent
_lowercase : Any = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : int = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : Any = vocab_size
_lowercase : str = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : str = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : List[str] = num_labels
_lowercase : int = num_choices
_lowercase : Optional[int] = scope
def lowerCamelCase__ ( self ):
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : Union[str, Any] = None
if self.use_input_mask:
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[int] = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Optional[int] = None
_lowercase : str = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = LlamaModel(config=__A )
model.to(__A )
model.eval()
_lowercase : Optional[int] = model(__A ,attention_mask=__A )
_lowercase : Any = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : List[str] = True
_lowercase : str = LlamaModel(__A )
model.to(__A )
model.eval()
_lowercase : int = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,)
_lowercase : List[Any] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,)
_lowercase : List[str] = model(__A ,attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : int = LlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
_lowercase : Tuple = model(__A ,attention_mask=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : int = True
_lowercase : List[Any] = True
_lowercase : Tuple = LlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
_lowercase : Union[str, Any] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,use_cache=__A ,)
_lowercase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowercase : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_lowercase : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
_lowercase : List[Any] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,output_hidden_states=__A ,)["""hidden_states"""][0]
_lowercase : List[str] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,past_key_values=__A ,output_hidden_states=__A ,)["""hidden_states"""][0]
# select random slice
_lowercase : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1E-3 ) )
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def lowerCamelCase__ ( self ):
_lowercase : Tuple = LlamaModelTester(self )
_lowercase : str = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase__ ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Any = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = 3
_lowercase : List[Any] = input_dict["""input_ids"""]
_lowercase : Union[str, Any] = input_ids.ne(1 ).to(__A )
_lowercase : Optional[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowercase : Union[str, Any] = LlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase : Any = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Dict = 3
_lowercase : List[Any] = """single_label_classification"""
_lowercase : str = input_dict["""input_ids"""]
_lowercase : Union[str, Any] = input_ids.ne(1 ).to(__A )
_lowercase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowercase : Optional[int] = LlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase : Dict = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Union[str, Any] = 3
_lowercase : Optional[Any] = """multi_label_classification"""
_lowercase : Optional[int] = input_dict["""input_ids"""]
_lowercase : List[str] = input_ids.ne(1 ).to(__A )
_lowercase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase : Tuple = LlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase : Dict = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowerCamelCase__ ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = ids_tensor([1, 10] ,config.vocab_size )
_lowercase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowercase : Optional[int] = LlamaModel(__A )
original_model.to(__A )
original_model.eval()
_lowercase : int = original_model(__A ).last_hidden_state
_lowercase : Tuple = original_model(__A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowercase : Tuple = {"""type""": scaling_type, """factor""": 10.0}
_lowercase : int = LlamaModel(__A )
scaled_model.to(__A )
scaled_model.eval()
_lowercase : List[Any] = scaled_model(__A ).last_hidden_state
_lowercase : Optional[Any] = scaled_model(__A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__A ,__A ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__A ,__A ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__A ,__A ,atol=1E-5 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : Dict = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
_lowercase : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowercase : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__A ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
_lowercase : Union[str, Any] = model(torch.tensor(__A ) )
# Expected mean on dim = -1
_lowercase : Union[str, Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase : Tuple = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__A ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
_lowercase : int = model(torch.tensor(__A ) )
# Expected mean on dim = -1
_lowercase : Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
_lowercase : Optional[Any] = model(torch.tensor(__A ) )
_lowercase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
_lowercase : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__A ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_lowercase : int = """Simply put, the theory of relativity states that """
_lowercase : List[str] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_lowercase : Union[str, Any] = tokenizer.encode(__A ,return_tensors="""pt""" )
_lowercase : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=__A )
# greedy generation outputs
_lowercase : Optional[int] = model.generate(__A ,max_new_tokens=64 ,top_p=__A ,temperature=1 ,do_sample=__A )
_lowercase : List[str] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__A )
self.assertEqual(__A ,__A )
| 359
|
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCamelCase__ = json.load(f)
@require_torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str] , a : Union[str, Any] ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(a )
def _lowerCamelCase ( self : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = FSMTForConditionalGeneration.from_pretrained(a ).to(a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def _lowerCamelCase ( self : int , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = f'''facebook/wmt19-{pair}'''
lowerCAmelCase__ : Tuple = self.get_tokenizer(a )
lowerCAmelCase__ : Any = self.get_model(a )
lowerCAmelCase__ : List[str] = bleu_data[pair]['src']
lowerCAmelCase__ : int = bleu_data[pair]['tgt']
lowerCAmelCase__ : Tuple = tokenizer(a , return_tensors='pt' , truncation=a , padding='longest' ).to(a )
lowerCAmelCase__ : Any = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ : Dict = tokenizer.batch_decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
lowerCAmelCase__ : Optional[int] = calculate_bleu(a , a )
print(a )
self.assertGreaterEqual(scores['bleu'] , a )
| 212
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ : Any = {'unk_token': '<unk>'}
lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : str , **a : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Dict , **a : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Tuple , a : Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ : List[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : int = tokenizer(a , max_length=len(a ) , padding=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(a , a )
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : Any = tokenizer(a , padding=a , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , a )
self.assertIn('attention_mask' , a )
self.assertNotIn('labels' , a )
self.assertNotIn('decoder_attention_mask' , a )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : Tuple = tokenizer(text_target=a , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : str = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=a , truncation=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ['A long paragraph for summarization.']
lowerCAmelCase__ : Any = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ : List[Any] = tokenizer(a , text_target=a , return_tensors='pt' )
lowerCAmelCase__ : Optional[int] = inputs['input_ids']
lowerCAmelCase__ : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Optional[int] = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase__ : int = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
lowerCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCAmelCase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 212
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _A ( ) -> Tuple:
'''simple docstring'''
__lowercase = _ask_options(
"In which compute environment are you running?", ["This machine", "AWS (Amazon SageMaker)"], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase = get_sagemaker_input()
else:
__lowercase = get_cluster_input()
return config
def _A ( UpperCamelCase_ : Union[str, Any]=None) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("config", description=UpperCamelCase_)
else:
__lowercase = argparse.ArgumentParser("Accelerate config command", description=UpperCamelCase_)
parser.add_argument(
"--config_file", default=UpperCamelCase_, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_)
return parser
def _A ( UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
__lowercase = get_user_input()
if args.config_file is not None:
__lowercase = args.config_file
else:
if not os.path.isdir(UpperCamelCase_):
os.makedirs(UpperCamelCase_)
__lowercase = default_yaml_config_file
if config_file.endswith(".json"):
config.to_json_file(UpperCamelCase_)
else:
config.to_yaml_file(UpperCamelCase_)
print(F"""accelerate configuration saved at {config_file}""")
def _A ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = config_command_parser()
__lowercase = parser.parse_args()
config_command(UpperCamelCase_)
if __name__ == "__main__":
main()
| 144
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, **UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
__lowercase = AutoConfig.from_pretrained(UpperCamelCase_, **UpperCamelCase_)
__lowercase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase_)
model.save_pretrained(UpperCamelCase_)
AutoTokenizer.from_pretrained(UpperCamelCase_).save_pretrained(UpperCamelCase_)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 144
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = KandinskyInpaintPipeline
lowerCAmelCase__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCAmelCase__ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCAmelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ = False
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase__ : int = MultilingualCLIP(_A )
UpperCAmelCase__ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : Optional[int] = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase__ : List[Any] = self.dummy_tokenizer
UpperCAmelCase__ : List[str] = self.dummy_unet
UpperCAmelCase__ : int = self.dummy_movq
UpperCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , )
UpperCAmelCase__ : List[str] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[Any] , _A : Any , _A : Union[str, Any]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : List[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase__ : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Optional[Any] = 0
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : str = torch.manual_seed(_A )
else:
UpperCAmelCase__ : Dict = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''cpu'''
UpperCAmelCase__ : str = self.get_dummy_components()
UpperCAmelCase__ : Dict = self.pipeline_class(**_A )
UpperCAmelCase__ : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : str = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase__ : List[Any] = output.images
UpperCAmelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase__ : Optional[Any] = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = '''a hat'''
UpperCAmelCase__ : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase__ : str = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
UpperCAmelCase__ : List[str] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase__ : Any = pipeline(
_A , image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 181
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase__ = 1_00 ) -> int:
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Tuple = pre_numerator
UpperCAmelCase__ : Tuple = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : str = cur_numerator
UpperCAmelCase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( _a : ndarray ):
return np.dot(_a , _a )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , *,
_SCREAMING_SNAKE_CASE = np.inf , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = 0.0 , ) -> None:
snake_case_ : Optional[int] = regularization
snake_case_ : List[Any] = gamma
if kernel == "linear":
snake_case_ : int = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
snake_case_ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case_ : List[Any] = f'''Unknown kernel: {kernel}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
snake_case_ : int = observations
snake_case_ : str = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case_) , ) : Any = np.shape(_SCREAMING_SNAKE_CASE )
def to_minimize(_SCREAMING_SNAKE_CASE ) -> float:
snake_case_ : Dict = 0
((snake_case_) , ) : str = np.shape(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = LinearConstraint(_SCREAMING_SNAKE_CASE , 0 , 0 )
snake_case_ : Union[str, Any] = Bounds(0 , self.regularization )
snake_case_ : Tuple = minimize(
_SCREAMING_SNAKE_CASE , np.ones(_SCREAMING_SNAKE_CASE ) , bounds=_SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
snake_case_ : Optional[int] = l_star
# calculating mean offset of separation plane to points
snake_case_ : Optional[Any] = 0
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case_ : int = s / n
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = DebertaTokenizer
A : List[Any] = True
A : Dict = DebertaTokenizerFast
def _lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Optional[int] = {"unk_token": "[UNK]"}
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : int = "lower newer"
snake_case_ : Dict = "lower newer"
return input_text, output_text
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : str = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tokens + [tokenizer.unk_token]
snake_case_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : str = tokenizer("Hello" , "World" )
snake_case_ : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : str = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.encode(
"sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case_ : Any = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) for seq in encoding["input_ids"]]
# fmt: off
snake_case_ : List[Any] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ : List[str] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , _SCREAMING_SNAKE_CASE )
for expected, decoded in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 36
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowerCAmelCase : Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__lowerCAmelCase : str = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__lowerCAmelCase : Optional[int] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__lowerCAmelCase : Dict = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__lowerCAmelCase : Union[str, Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=[1, 10, 100] , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Optional[Any]=3.0 ) -> List[str]:
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=UpperCamelCase__ ) as executor:
__magic_name__ = []
__magic_name__ = Counter()
__magic_name__ = 0
__magic_name__ = defaultdict(UpperCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
for candidate in candidates:
__magic_name__ = candidate + """\n""" + test_case
__magic_name__ = (test_program, timeout, task_id, completion_id[task_id])
__magic_name__ = executor.submit(UpperCamelCase__ , *UpperCamelCase__ )
futures.append(UpperCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase__ ):
__magic_name__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__magic_name__ , __magic_name__ = [], []
for result in results.values():
result.sort()
__magic_name__ = [r[1]["""passed"""] for r in result]
total.append(len(UpperCamelCase__ ) )
correct.append(sum(UpperCamelCase__ ) )
__magic_name__ = np.array(UpperCamelCase__ )
__magic_name__ = np.array(UpperCamelCase__ )
__magic_name__ = k
__magic_name__ = {F'''pass@{k}''': estimate_pass_at_k(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a__ ( A_, A_, A_ ):
'''simple docstring'''
def estimator(A_, A_, A_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(A_, A_ ):
__magic_name__ = itertools.repeat(A_, len(A_ ) )
else:
assert len(A_ ) == len(A_ )
__magic_name__ = iter(A_ )
return np.array([estimator(int(A_ ), int(A_ ), A_ ) for n, c in zip(A_, A_ )] )
| 88
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( a_ ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="None" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
A : Any = parent
A : Any = batch_size
A : List[str] = seq_length
A : List[str] = is_training
A : Union[str, Any] = use_input_mask
A : Any = use_token_type_ids
A : Tuple = use_labels
A : List[str] = vocab_size
A : Tuple = hidden_size
A : str = num_hidden_layers
A : str = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Union[str, Any] = hidden_act
A : str = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : str = max_position_embeddings
A : List[Any] = type_vocab_size
A : int = type_sequence_label_size
A : str = initializer_range
A : str = num_labels
A : Optional[Any] = num_choices
A : List[str] = relative_attention
A : List[Any] = position_biased_input
A : int = pos_att_type
A : Dict = scope
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any = None
if self.use_input_mask:
A : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A : List[str] = None
if self.use_token_type_ids:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Dict = None
A : List[Any] = None
A : str = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[str] = self.get_config()
A : str = 300
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = DebertaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0]
A : List[Any] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0]
A : str = model(SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : str = DebertaForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[str] = self.num_labels
A : List[str] = DebertaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Dict = self.num_labels
A : str = DebertaForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = DebertaForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Any = config_and_inputs
A : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( a_ , a_ , unittest.TestCase ):
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = DebertaModelTester(self )
A : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[Any] = DebertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Union[str, Any] = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
A : int = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
A : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
A : Any = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 350
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311
| 0
|
'''simple docstring'''
def a_ ( __snake_case : list ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__snake_case ) == 1:
return True
lowerCamelCase_ =series[1] - series[0]
for index in range(len(__snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a_ ( __snake_case : list ) -> float:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowerCamelCase_ =0
for val in series:
answer += val
return answer / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 371
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ( snake_case_ : str ) ->dict[str, int]:
'''simple docstring'''
__A : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( snake_case_ : tuple ) ->str:
'''simple docstring'''
return x[0]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_letter_count(snake_case_ )
__A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
__A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=snake_case_ )
__A : Optional[int] = ''''''.join(freq_to_letter[freq] )
__A : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ ,reverse=snake_case_ )
__A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def __lowercase ( snake_case_ : str ) ->int:
'''simple docstring'''
__A : Any = get_frequency_order(snake_case_ )
__A : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[str] = "canine"
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=1_6384 , snake_case__ : str=16 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=1E-12 , snake_case__ : Any=0 , snake_case__ : Optional[int]=0xe_000 , snake_case__ : List[str]=0xe_001 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=1_6384 , snake_case__ : Union[str, Any]=128 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Optional[int] =hidden_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[Any] =layer_norm_eps
# Character config:
lowerCamelCase_ : List[str] =downsampling_rate
lowerCamelCase_ : List[Any] =upsampling_kernel_size
lowerCamelCase_ : Any =num_hash_functions
lowerCamelCase_ : Optional[int] =num_hash_buckets
lowerCamelCase_ : Union[str, Any] =local_transformer_stride
| 144
| 1
|
"""simple docstring"""
from math import ceil
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
__lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 108
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__A = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__A = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence"),
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
if self.config_name == "default":
__lowerCAmelCase: Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
__lowerCAmelCase: Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=False)-> str:
'''simple docstring'''
if gpus is None:
__lowerCAmelCase: Union[str, Any] = 1 if torch.cuda.is_available() else 0
__lowerCAmelCase: Dict = {"src": sources, "mt": predictions, "ref": references}
__lowerCAmelCase: Union[str, Any] = [dict(zip(UpperCamelCase__ , UpperCamelCase__)) for t in zip(*data.values())]
__lowerCAmelCase , __lowerCAmelCase: str = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__)
return {"mean_score": mean_score, "scores": scores}
| 108
| 1
|
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : int = logging.get_logger(__name__)
@dataclass
class lowercase ( a ):
lowercase__ : Tuple = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : str , **_UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(_UpperCamelCase )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**_UpperCamelCase )
lowercase__ : str = field(
default=a , metadata={"""help""": """Name of TPU"""} , )
lowercase__ : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowercase__ : bool = field(default=a , metadata={"""help""": """Benchmark models in eager model."""} )
lowercase__ : bool = field(
default=a , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __snake_case( self : Optional[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def __snake_case( self : List[Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __snake_case( self : Optional[int] ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __snake_case( self : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __snake_case( self : Optional[int] ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 206
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_lowerCamelCase : Tuple = {'''allegro/herbert-base-cased''': 5_14}
_lowerCamelCase : Optional[int] = {}
class lowercase ( a ):
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = HerbertTokenizer
def __init__( self : Dict , _UpperCamelCase : Any=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Union[str, Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple="</s>" , **_UpperCamelCase : Any , ) -> str:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A , A , ) -> str:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def snake_case_( self , A = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def snake_case_( self ) -> List[Any]:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[str]:
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(A , A ):
_SCREAMING_SNAKE_CASE = len(A )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A )}.' )
# get prompt text embeddings
_SCREAMING_SNAKE_CASE = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_embeddings.shape
_SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , A , 1 )
_SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
_SCREAMING_SNAKE_CASE = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='
f' {type(A )}.' )
elif isinstance(A , A ):
_SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
_SCREAMING_SNAKE_CASE = negative_prompt
_SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
_SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(A , A , 1 )
_SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_SCREAMING_SNAKE_CASE = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
_SCREAMING_SNAKE_CASE = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
_SCREAMING_SNAKE_CASE = torch.randn(
A , generator=A , device=self.device , dtype=A )
_SCREAMING_SNAKE_CASE = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
_SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
_SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
_SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
_SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
_SCREAMING_SNAKE_CASE = max(-dx , 0 )
_SCREAMING_SNAKE_CASE = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
_SCREAMING_SNAKE_CASE = 1 / 0.1_8215 * latents
_SCREAMING_SNAKE_CASE = self.vae.decode(A ).sample
_SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_SCREAMING_SNAKE_CASE = None
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 58
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : List[str] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = '''autoformer'''
lowerCamelCase_ :Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = [1, 2, 3, 4, 5, 6, 7] , snake_case_ = True , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = "gelu" , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_ = True , snake_case_=True , snake_case_ = 1_0 , snake_case_ = 2_5 , snake_case_ = 3 , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = prediction_length
UpperCAmelCase_ : List[str] = context_length if context_length is not None else prediction_length
UpperCAmelCase_ : Optional[int] = distribution_output
UpperCAmelCase_ : Optional[int] = loss
UpperCAmelCase_ : Union[str, Any] = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : List[str] = lags_sequence
UpperCAmelCase_ : Any = scaling
UpperCAmelCase_ : Any = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Optional[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[str] = embedding_dimension
else:
UpperCAmelCase_ : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : List[str] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : str = encoder_attention_heads
UpperCAmelCase_ : str = decoder_attention_heads
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : str = decoder_ffn_dim
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : str = decoder_layers
UpperCAmelCase_ : str = dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : Any = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase_ : Any = label_length
UpperCAmelCase_ : Union[str, Any] = moving_average
UpperCAmelCase_ : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 274
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase_ = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowercase_ = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowercase_ = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowercase_ = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
lowercase_ = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
lowercase_ = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def lowerCamelCase ( __lowerCamelCase : Dict ) ->Optional[int]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=False ) ->List[Any]:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ) ->int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias']
_SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple ) ->List[str]:
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
_SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
_SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
_SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
_SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = channels_list[i]
_SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
_SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
_SCREAMING_SNAKE_CASE = """middle_block.0"""
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
_SCREAMING_SNAKE_CASE = """middle_block.1"""
_SCREAMING_SNAKE_CASE = convert_attention(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
_SCREAMING_SNAKE_CASE = """middle_block.2"""
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowercase_ = parser.parse_args()
lowercase_ = strabool(args.class_cond)
lowercase_ = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase_ = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowercase_ = None
lowercase_ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
lowercase_ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 58
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291
| 0
|
def lowerCAmelCase_ ( __a = 100 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =set()
lowerCamelCase__: Any =0
lowerCamelCase__: int =n + 1 # maximum limit
for a in range(2 , __a ):
for b in range(2 , __a ):
lowerCamelCase__: Tuple =a**b # calculates the current power
collect_powers.add(__a ) # adds the result to the set
return len(__a )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 273
|
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
assert x is not None
assert y is not None
lowerCamelCase__: Any =len(__a )
lowerCamelCase__: int =len(__a )
# declaring the array for storing the dp values
lowerCamelCase__: List[Any] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__: str =1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__: str =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__: Any =""
lowerCamelCase__ , lowerCamelCase__: str =m, n
while i > 0 and j > 0:
lowerCamelCase__: Union[str, Any] =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__: Any =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 273
| 1
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = name
lowerCAmelCase : str = value
lowerCAmelCase : Any = weight
def __repr__( self ):
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase__ ( self ):
"""simple docstring"""
return self.value
def lowercase__ ( self ):
"""simple docstring"""
return self.name
def lowercase__ ( self ):
"""simple docstring"""
return self.weight
def lowercase__ ( self ):
"""simple docstring"""
return self.value / self.weight
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Tuple = sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase , lowerCAmelCase : List[Any] = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
print("moving disk from" , SCREAMING_SNAKE_CASE , "to" , SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = int(input("Height of hanoi: " ).strip() )
move_tower(SCREAMING_SNAKE_CASE , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 108
| 1
|
import re
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase : Any = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(lowerCAmelCase__, lowerCAmelCase__ ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 366
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __SCREAMING_SNAKE_CASE ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = None , ):
super().__init__()
lowercase : str = initial_learning_rate
lowercase : Optional[Any] = warmup_steps
lowercase : Union[str, Any] = power
lowercase : List[str] = decay_schedule_fn
lowercase : List[str] = name
def __call__( self , SCREAMING_SNAKE_CASE__ ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase : Optional[Any] = tf.cast(SCREAMING_SNAKE_CASE__ , tf.floataa )
lowercase : Tuple = tf.cast(self.warmup_steps , tf.floataa )
lowercase : Optional[Any] = global_step_float / warmup_steps_float
lowercase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 0.9, _UpperCamelCase = 0.9_9_9, _UpperCamelCase = 1e-8, _UpperCamelCase = None, _UpperCamelCase = None, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0, _UpperCamelCase = None, ) ->Any:
"""simple docstring"""
lowercase : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCamelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=_UpperCamelCase, )
if num_warmup_steps:
lowercase : Tuple = WarmUp(
initial_learning_rate=_UpperCamelCase, decay_schedule_fn=_UpperCamelCase, warmup_steps=_UpperCamelCase, )
if weight_decay_rate > 0.0:
lowercase : Tuple = AdamWeightDecay(
learning_rate=_UpperCamelCase, weight_decay_rate=_UpperCamelCase, beta_a=_UpperCamelCase, beta_a=_UpperCamelCase, epsilon=_UpperCamelCase, clipnorm=_UpperCamelCase, global_clipnorm=_UpperCamelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=_UpperCamelCase, )
else:
lowercase : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=_UpperCamelCase, beta_a=_UpperCamelCase, beta_a=_UpperCamelCase, epsilon=_UpperCamelCase, clipnorm=_UpperCamelCase, global_clipnorm=_UpperCamelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ = 0.001 , SCREAMING_SNAKE_CASE__ = 0.9 , SCREAMING_SNAKE_CASE__ = 0.999 , SCREAMING_SNAKE_CASE__ = 1E-7 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : str = weight_decay_rate
lowercase : int = include_in_weight_decay
lowercase : str = exclude_from_weight_decay
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = {'''WarmUp''': WarmUp}
return super(SCREAMING_SNAKE_CASE__ , cls ).from_config(SCREAMING_SNAKE_CASE__ , custom_objects=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
super(SCREAMING_SNAKE_CASE__ , self )._prepare_local(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Tuple = list(zip(*SCREAMING_SNAKE_CASE__ ) )
return super(SCREAMING_SNAKE_CASE__ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , name=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase : Tuple = apply_state or {}
lowercase : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase : Dict = self._fallback_apply_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase , lowercase : int = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ )
lowercase : str = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase , lowercase : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None:
return False
return True
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self ):
lowercase : Optional[Any] = []
lowercase : Tuple = None
@property
def __lowerCamelCase ( self ):
if self._accum_steps is None:
lowercase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __lowerCamelCase ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE__ ):
if not self._gradients:
lowercase : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE__ ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE__ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE__ )}""" )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE__ )
self._accum_steps.assign_add(1 )
def __lowerCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE__ ) )
| 173
| 0
|
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def a ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206
|
'''simple docstring'''
from __future__ import annotations
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num <= 0:
A_ : List[Any] = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase__ )
A_ : Dict = [True] * (num + 1)
A_ : List[Any] = []
A_ : Tuple = 2
A_ : Optional[int] = int(math.sqrt(lowerCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase__ ):
if sieve[i] is True:
A_ : List[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 206
| 1
|
"""simple docstring"""
from collections import namedtuple
UpperCAmelCase = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1_000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 264.172),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 4_226.75),
}
def lowercase ( a__ : float , a__ : str , a__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(a__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(a__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''deta'''
snake_case__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=900 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Dict=6 , __UpperCamelCase : Union[str, Any]=2048 , __UpperCamelCase : str=8 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Union[str, Any]=1024 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Dict=256 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : Dict=True , __UpperCamelCase : str=False , __UpperCamelCase : List[Any]="sine" , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=300 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=1 , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : int=1 , __UpperCamelCase : str=5 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Tuple=0.2_5 , **__UpperCamelCase : Union[str, Any] , ) -> Optional[int]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__UpperCamelCase )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.d_model
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 54
| 0
|
from sklearn.metrics import fa_score
import datasets
A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
A__ = fa_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase )
return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
| 274
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A : str = logging.get_logger(__name__)
A : Union[str, Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''layoutlmv3'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = max_ad_position_embeddings
A__ = coordinate_size
A__ = shape_size
A__ = has_relative_attention_bias
A__ = rel_pos_bins
A__ = max_rel_pos
A__ = has_spatial_attention_bias
A__ = rel_ad_pos_bins
A__ = max_rel_ad_pos
A__ = text_embed
A__ = visual_embed
A__ = input_size
A__ = num_channels
A__ = patch_size
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[str] = version.parse('''1.12''' )
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def a_ ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
@property
def a_ ( self : Tuple ) -> int:
"""simple docstring"""
return 12
def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 274
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ :Any = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowercase = [image]
lowercase = [trans(img.convert('''RGB''' ) ) for img in image]
lowercase = torch.stack(lowerCAmelCase__ )
return image
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=A__ ,scheduler=A__)
def A__ ( self ,A__):
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def A__ ( self ,A__ ,A__ ,A__):
# get the original timestep using init_timestep
lowercase = min(int(num_inference_steps * strength) ,A__)
lowercase = max(num_inference_steps - init_timestep ,0)
lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__=None):
if not isinstance(A__ ,(torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A__)}')
lowercase = image.to(device=A__ ,dtype=A__)
if isinstance(A__ ,A__) and len(A__) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(A__)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
lowercase = init_latents.shape
lowercase = randn_tensor(A__ ,generator=A__ ,device=A__ ,dtype=A__)
# get latents
print('''add noise to latents at timestep''' ,A__)
lowercase = self.scheduler.add_noise(A__ ,A__ ,A__)
lowercase = init_latents
return latents
@torch.no_grad()
def __call__( self ,A__ = None ,A__ = 0.8 ,A__ = 1 ,A__ = None ,A__ = 0.0 ,A__ = 5_0 ,A__ = None ,A__ = "pil" ,A__ = True ,):
self.check_inputs(A__)
# 2. Preprocess image
lowercase = preprocess(A__)
# 3. set timesteps
self.scheduler.set_timesteps(A__ ,device=self.device)
lowercase , lowercase = self.get_timesteps(A__ ,A__ ,self.device)
lowercase = timesteps[:1].repeat(A__)
# 4. Prepare latent variables
lowercase = self.prepare_latents(A__ ,A__ ,A__ ,self.unet.dtype ,self.device ,A__)
lowercase = latents
# 5. Denoising loop
for t in self.progress_bar(A__):
# 1. predict noise model_output
lowercase = self.unet(A__ ,A__).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase = self.scheduler.step(
A__ ,A__ ,A__ ,eta=A__ ,use_clipped_model_output=A__ ,generator=A__ ,).prev_sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
lowercase = image.cpu().permute(0 ,2 ,3 ,1).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A__)
| 357
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ :str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["ViTFeatureExtractor"]
lowercase__ :int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase = [False] * len(UpperCamelCase__ )
UpperCAmelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = True
UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 273
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273
| 1
|
'''simple docstring'''
from statistics import mean
import numpy as np
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Optional[Any] = 0
# Number of processes finished
snake_case_ : Dict = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case_ : List[str] = [0] * no_of_process
# List to include calculation results
snake_case_ : int = [0] * no_of_process
# Sort by arrival time.
snake_case_ : Any = [burst_time[i] for i in np.argsort(lowerCamelCase_ )]
snake_case_ : Tuple = [process_name[i] for i in np.argsort(lowerCamelCase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case_ : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case_ : List[str] = arrival_time[i]
snake_case_ : Optional[int] = 0
# Index showing the location of the process being performed
snake_case_ : Union[str, Any] = 0
# Saves the current response ratio.
snake_case_ : Union[str, Any] = 0
for i in range(0 , lowerCamelCase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case_ : Any = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case_ : Tuple = temp
snake_case_ : Optional[Any] = i
# Calculate the turn around time
snake_case_ : List[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case_ : Any = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [0] * no_of_process
for i in range(0 , lowerCamelCase_ ):
snake_case_ : Any = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__A : Tuple = 5
__A : Dict = ['A', 'B', 'C', 'D', 'E']
__A : Dict = [1, 2, 3, 4, 5]
__A : Dict = [1, 2, 3, 4, 5]
__A : str = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__A : int = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 366
|
'''simple docstring'''
import re
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[Any] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
__A : int = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 8
| 0
|
lowercase__ :Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
# keep track of all the paths to be checked
lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase = queue.pop(0 )
# get the last node from the path
lowercase = path[-1]
if node not in explored:
lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase = list(lowerCAmelCase__ )
new_path.append(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase = [start]
lowercase = set(lowerCAmelCase__ )
# Keep tab on distances from `start` node.
lowercase = {start: 0, target: -1}
while queue:
lowercase = queue.pop(0 )
if node == target:
lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 101
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = ['torch', 'torchsde']
def __init__( self : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 173
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = "align_text_model"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=30522 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Dict=3072 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : Dict=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Union[str, Any]="absolute" ,lowerCamelCase__ : Dict=True ,**lowerCamelCase__ : Dict ,) -> str:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = pad_token_id
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = "align_vision_model"
def __init__( self : Optional[Any] ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 600 ,lowerCamelCase__ : float = 2.0 ,lowerCamelCase__ : float = 3.1 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase__ : List[int] = [] ,lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase__ : float = 0.25 ,lowerCamelCase__ : str = "swish" ,lowerCamelCase__ : int = 2560 ,lowerCamelCase__ : str = "mean" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 0.001 ,lowerCamelCase__ : float = 0.99 ,lowerCamelCase__ : float = 0.2 ,**lowerCamelCase__ : List[str] ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = width_coefficient
SCREAMING_SNAKE_CASE = depth_coefficient
SCREAMING_SNAKE_CASE = depth_divisor
SCREAMING_SNAKE_CASE = kernel_sizes
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = out_channels
SCREAMING_SNAKE_CASE = depthwise_padding
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = num_block_repeats
SCREAMING_SNAKE_CASE = expand_ratios
SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dim
SCREAMING_SNAKE_CASE = pooling_type
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = batch_norm_eps
SCREAMING_SNAKE_CASE = batch_norm_momentum
SCREAMING_SNAKE_CASE = drop_connect_rate
SCREAMING_SNAKE_CASE = sum(lowerCamelCase__ ) * 4
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "align"
__snake_case : Any = True
def __init__( self : int ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : List[str]=640 ,lowerCamelCase__ : str=1.0 ,lowerCamelCase__ : int=0.02 ,**lowerCamelCase__ : List[Any] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = AlignTextConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AlignVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = temperature_init_value
SCREAMING_SNAKE_CASE = initializer_range
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] ,lowerCamelCase__ : AlignTextConfig ,lowerCamelCase__ : AlignVisionConfig ,**lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 193
|
from PIL import Image
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
'''simple docstring'''
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE_ = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 193
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
if num <= 0:
_lowerCAmelCase : Union[str, Any] = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(lowerCAmelCase_ )
_lowerCAmelCase : List[Any] = [True] * (num + 1)
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : str = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,lowerCAmelCase_ ):
if sieve[i] is True:
_lowerCAmelCase : int = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 44
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : Any = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
| 54
| 0
|
'''simple docstring'''
a_ : Optional[int] = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
a_ : Union[str, Any] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 1_2,
"Pm": 1_5,
"Em": 1_8,
"Zm": 2_1,
"Ym": 2_4,
}
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> float:
'''simple docstring'''
_a = from_type.lower().strip('s' )
_a = to_type.lower().strip('s' )
_a = UNIT_SYMBOL.get(__lowerCAmelCase , __lowerCAmelCase )
_a = UNIT_SYMBOL.get(__lowerCAmelCase , __lowerCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
_a = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__lowerCAmelCase )}'
)
raise ValueError(__lowerCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
_a = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__lowerCAmelCase )}'
)
raise ValueError(__lowerCAmelCase )
_a = METRIC_CONVERSION[from_sanitized]
_a = METRIC_CONVERSION[to_sanitized]
_a = 1
if from_exponent > to_exponent:
_a = from_exponent - to_exponent
else:
_a = -(to_exponent - from_exponent)
return value * pow(10 , __lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350
|
'''simple docstring'''
from timeit import timeit
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
number &= number - 1
result += 1
return result
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A () -> None:
'''simple docstring'''
def do_benchmark(lowerCAmelCase__ :int ) -> None:
_a = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }' )
_a = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=lowerCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }' )
_a = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=lowerCAmelCase__ , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 104
| 0
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str]=1 ) -> int:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any]=0 ) -> str:
_a : Optional[int] =[]
for old_item in old_list:
_a : Union[str, Any] =old_item.replace("""in_layers.0""" ,"""norm1""" )
_a : List[str] =new_item.replace("""in_layers.2""" ,"""conv1""" )
_a : Dict =new_item.replace("""out_layers.0""" ,"""norm2""" )
_a : Dict =new_item.replace("""out_layers.3""" ,"""conv2""" )
_a : Any =new_item.replace("""emb_layers.1""" ,"""time_emb_proj""" )
_a : Union[str, Any] =new_item.replace("""skip_connection""" ,"""conv_shortcut""" )
_a : List[str] =shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=0 ) -> Dict:
_a : Optional[int] =[]
for old_item in old_list:
_a : Tuple =old_item
_a : Any =new_item.replace("""norm.weight""" ,"""group_norm.weight""" )
_a : List[Any] =new_item.replace("""norm.bias""" ,"""group_norm.bias""" )
_a : Union[str, Any] =new_item.replace("""proj_out.weight""" ,"""proj_attn.weight""" )
_a : Tuple =new_item.replace("""proj_out.bias""" ,"""proj_attn.bias""" )
_a : Optional[int] =shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : int=None ) -> str:
assert isinstance(__a ,__a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Any =old_checkpoint[path]
_a : List[str] =old_tensor.shape[0] // 3
_a : Optional[Any] =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int =old_tensor.shape[0] // config['''num_head_channels'''] // 3
_a : Dict =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a : List[Any] =old_tensor.split(channels // num_heads ,dim=1 )
_a : Union[str, Any] =query.reshape(__a )
_a : List[Any] =key.reshape(__a )
_a : Union[str, Any] =value.reshape(__a )
for path in paths:
_a : Tuple =path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Dict =new_path.replace("""middle_block.0""" ,"""mid_block.resnets.0""" )
_a : List[str] =new_path.replace("""middle_block.1""" ,"""mid_block.attentions.0""" )
_a : str =new_path.replace("""middle_block.2""" ,"""mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Tuple =new_path.replace(replacement["""old"""] ,replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] =old_checkpoint[path['''old''']][:, :, 0]
else:
_a : List[str] =old_checkpoint[path['''old''']]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ) -> int:
_a : int ={}
_a : Union[str, Any] =checkpoint['''time_embed.0.weight''']
_a : Tuple =checkpoint['''time_embed.0.bias''']
_a : Union[str, Any] =checkpoint['''time_embed.2.weight''']
_a : Union[str, Any] =checkpoint['''time_embed.2.bias''']
_a : List[str] =checkpoint['''input_blocks.0.0.weight''']
_a : Tuple =checkpoint['''input_blocks.0.0.bias''']
_a : Any =checkpoint['''out.0.weight''']
_a : Dict =checkpoint['''out.0.bias''']
_a : str =checkpoint['''out.2.weight''']
_a : List[Any] =checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_a : Optional[Any] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_a : Optional[int] ={
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_a : int =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_a : int ={
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_a : List[str] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_a : Any ={
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__a )
}
for i in range(1 ,__a ):
_a : Optional[int] =(i - 1) // (config['''num_res_blocks'''] + 1)
_a : List[Any] =(i - 1) % (config['''num_res_blocks'''] + 1)
_a : int =[key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_a : Dict =[key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_a : List[Any] =checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_a : Optional[Any] =checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_a : List[Any] =renew_resnet_paths(__a )
_a : Union[str, Any] ={'''old''': F"input_blocks.{i}.0", '''new''': F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_a : Union[str, Any] ={'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path, resnet_op] ,config=__a )
if len(__a ):
_a : List[Any] =renew_attention_paths(__a )
_a : List[str] ={
'''old''': F"input_blocks.{i}.1",
'''new''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_a : Optional[Any] ={
F"input_blocks.{i}.1.qkv.bias": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=__a ,config=__a ,)
_a : str =middle_blocks[0]
_a : List[Any] =middle_blocks[1]
_a : Optional[int] =middle_blocks[2]
_a : List[str] =renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : Optional[int] =renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : List[str] =renew_attention_paths(__a )
_a : int ={
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__a ,__a ,__a ,attention_paths_to_split=__a ,config=__a )
for i in range(__a ):
_a : List[str] =i // (config['''num_res_blocks'''] + 1)
_a : Optional[Any] =i % (config['''num_res_blocks'''] + 1)
_a : Optional[int] =[shave_segments(__a ,2 ) for name in output_blocks[i]]
_a : Optional[int] ={}
for layer in output_block_layers:
_a : List[Any] =layer.split(""".""" )[0], shave_segments(__a ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_a : Dict =[layer_name]
if len(__a ) > 1:
_a : Dict =[key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_a : str =[key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_a : Tuple =renew_resnet_paths(__a )
_a : int =renew_resnet_paths(__a )
_a : Union[str, Any] ={'''old''': F"output_blocks.{i}.0", '''new''': F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__a ,__a ,__a ,additional_replacements=[meta_path] ,config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : str =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_a : str =checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_a : List[Any] =checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_a : str =[]
if len(__a ):
_a : Optional[int] =renew_attention_paths(__a )
_a : Optional[Any] ={
'''old''': F"output_blocks.{i}.1",
'''new''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_a : Optional[int] ={
F"output_blocks.{i}.1.qkv.bias": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None ,config=__a ,)
else:
_a : Dict =renew_resnet_paths(__a ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : List[Any] ='''.'''.join(["""output_blocks""", str(__a ), path["""old"""]] )
_a : Optional[int] ='''.'''.join(["""up_blocks""", str(__a ), """resnets""", str(__a ), path["""new"""]] )
_a : int =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A__: Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A__: List[Any] = parser.parse_args()
A__: str = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A__: Union[str, Any] = json.loads(f.read())
A__: List[Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A__: List[str] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A__: Optional[Any] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: int = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: int = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 276
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 97
| 0
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase__ = """base_with_context"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCAmelCase = weights[f'''layers_{lyr_num}''']
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = ly_weight["""attention"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCAmelCase = weights[f'''layers_{lyr_num}''']
_UpperCAmelCase = ly_weight["""attention"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_UpperCAmelCase = weights[f'''layers_{lyr_num}''']
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_UpperCAmelCase = ly_weight["""self_attention"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = ly_weight["""MultiHeadDotProductAttention_0"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_UpperCAmelCase = jnp.tree_util.tree_map(onp.array ,_lowerCAmelCase )
_UpperCAmelCase = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_UpperCAmelCase = os.path.join(args.checkpoint_path ,"""..""" ,"""config.gin""" )
_UpperCAmelCase = inference.parse_training_gin_file(_lowerCAmelCase ,_lowerCAmelCase )
_UpperCAmelCase = inference.InferenceModel(args.checkpoint_path ,_lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ,variance_type="""fixed_large""" )
_UpperCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
_UpperCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["""targets_context"""] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
_UpperCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["""targets_context"""] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
_UpperCAmelCase = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] ,_lowerCAmelCase )
_UpperCAmelCase = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] ,_lowerCAmelCase )
_UpperCAmelCase = load_decoder(ta_checkpoint["""target"""]["""decoder"""] ,_lowerCAmelCase )
_UpperCAmelCase = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_UpperCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase ,continuous_encoder=_lowerCAmelCase ,decoder=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,melgan=_lowerCAmelCase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase__ = parser.parse_args()
main(args)
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowerCamelCase : Any = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 8
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
snake_case__ : List[str] = FileLock(str(tmpdir / "foo.lock" ) )
snake_case__ : Dict = FileLock(str(tmpdir / "foo.lock" ) )
snake_case__ : List[Any] = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
snake_case__ : Dict = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
snake_case__ : List[Any] = "a" * 1000 + ".lock"
snake_case__ : Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 358
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 193
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193
| 1
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Any) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 355
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( a__ = "https://www.worldometers.info/coronavirus" ) ->dict:
'''simple docstring'''
_UpperCamelCase = BeautifulSoup(requests.get(a__ ).text , "html.parser" )
_UpperCamelCase = soup.findAll("h1" )
_UpperCamelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(a__ , a__ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 63
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any]=13 , UpperCamelCase__: Dict=7 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: List[str]=99 , UpperCamelCase__: Dict=32 , UpperCamelCase__: int=2 , UpperCamelCase__: Any=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: Union[str, Any]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: List[Any]=0 , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Optional[int] = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : int = scope
lowerCamelCase__ : Tuple = projection_dim
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Tuple = None
if self.use_token_type_ids:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = None
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
lowerCamelCase__ : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[Any] = TFDPRContextEncoder(config=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : int = TFDPRQuestionEncoder(config=UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
lowerCamelCase__ : str = TFDPRReader(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
a = False
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = TFDPRModelTester(self )
lowerCamelCase__ : int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ : Union[str, Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 41
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase = '''cpu'''
__lowercase = Path(A__ )
# VAE DECODER
__lowercase = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 104
| 0
|
import math
def A__ ( lowerCamelCase ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_: Optional[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCAmelCase )
if number < 1:
UpperCamelCase_: int = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCamelCase_: Optional[int] = int(math.log(number // 3 , 2 ) ) + 2
UpperCamelCase_: str = [3, 5]
UpperCamelCase_: str = 2
UpperCamelCase_: Optional[Any] = 3
for block in range(1 , _UpperCAmelCase ):
for _ in range(_UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCamelCase_ : Union[str, Any] = 0
try:
lowerCamelCase_ : int = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 359
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = PegasusConfig
__UpperCamelCase : str = {}
__UpperCamelCase : Optional[Any] = """gelu"""
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str=13 , snake_case_ : Dict=7 , snake_case_ : List[Any]=True , snake_case_ : Optional[int]=False , snake_case_ : Any=99 , snake_case_ : Optional[Any]=32 , snake_case_ : Dict=2 , snake_case_ : Any=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=40 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: Union[str, Any] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Tuple = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[Any] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: str = attention_probs_dropout_prob
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: Union[str, Any] = eos_token_id
UpperCamelCase_: Optional[int] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: List[str] = prepare_pegasus_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] , snake_case_ : Dict ):
UpperCamelCase_: Any = TFPegasusModel(config=snake_case_ ).get_decoder()
UpperCamelCase_: Any = inputs_dict["""input_ids"""]
UpperCamelCase_: int = input_ids[:1, :]
UpperCamelCase_: List[str] = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase_: Tuple = inputs_dict["""head_mask"""]
UpperCamelCase_: int = 1
# first forward pass
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCamelCase_: Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCamelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : int = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = TFPegasusModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase : Optional[int] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCamelCase : Union[str, Any] = """google/pegasus-xsum"""
@cached_property
def lowerCAmelCase__ ( self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
UpperCamelCase_: str = self.translate_src_text(**snake_case_ )
assert self.expected_text == generated_words
def lowerCAmelCase__ ( self : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Tuple = self.tokenizer(self.src_text , **snake_case_ , padding=snake_case_ , return_tensors="""tf""" )
UpperCamelCase_: Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case_ , )
UpperCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 223
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : dict ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_SCREAMING_SNAKE_CASE =set()
return any(
node not in visited and depth_first_search(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for node in graph )
def _lowerCAmelCase ( _UpperCamelCase : dict , _UpperCamelCase : int , _UpperCamelCase : set , _UpperCamelCase : set ) -> bool:
"""simple docstring"""
visited.add(_UpperCamelCase )
rec_stk.add(_UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _A ( _a : Union[str, Any] = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
A = BeautifulSoup(requests.get(__lowerCamelCase ).text , """html.parser""" )
A = soup.findAll("""h1""" )
A = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCamelCase , __lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 351
|
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Any = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335
|
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
def __call__( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : List[str] = 1
A_ : List[str] = self.unet(snake_case_ , snake_case_ ).sample
A_ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A_ : List[Any] = scheduler_output - scheduler_output + torch.ones_like(snake_case_ )
return result
| 286
| 0
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = [False] * len(lowercase__ )
_lowerCamelCase : Optional[int] = [-1] * len(lowercase__ )
def dfs(lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase__ , 1 - c )
for i in range(len(lowercase__ ) ):
if not visited[i]:
dfs(lowercase__ , 0 )
for i in range(len(lowercase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 12
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 12
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> bool:
'''simple docstring'''
A__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 68
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=13 , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : int=16 , SCREAMING_SNAKE_CASE : Dict=[1, 2, 1] , SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=2.0 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[str]=1e-5 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[str]=10 , SCREAMING_SNAKE_CASE : Dict=8 , ):
_A : List[Any] = parent
_A : List[Any] = batch_size
_A : Union[str, Any] = image_size
_A : Tuple = patch_size
_A : List[str] = num_channels
_A : List[Any] = embed_dim
_A : List[str] = depths
_A : Optional[int] = num_heads
_A : List[Any] = window_size
_A : Union[str, Any] = mlp_ratio
_A : Optional[Any] = qkv_bias
_A : int = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : List[Any] = drop_path_rate
_A : Any = hidden_act
_A : List[Any] = use_absolute_embeddings
_A : List[Any] = patch_norm
_A : Union[str, Any] = layer_norm_eps
_A : Union[str, Any] = initializer_range
_A : Dict = is_training
_A : Any = scope
_A : Any = use_labels
_A : List[str] = type_sequence_label_size
_A : Optional[int] = encoder_stride
def A ( self : Any):
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Dict = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Tuple = self.get_config()
return config, pixel_values, labels
def A ( self : str):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]):
_A : Any = SwinvaModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE)
_A : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def A ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict):
_A : Optional[Any] = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Any = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_A : Tuple = 1
_A : Optional[Any] = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_A : Optional[int] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any):
_A : Dict = self.type_sequence_label_size
_A : Optional[int] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A ( self : Any):
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A , _A , _A : List[str] = config_and_inputs
_A : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def A ( self : int):
_A : List[Any] = SwinvaModelTester(self)
_A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , embed_dim=37)
def A ( self : List[str]):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Tuple):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def A ( self : Dict):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def A ( self : str):
pass
def A ( self : Any):
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear))
def A ( self : Any):
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Any = model_class(SCREAMING_SNAKE_CASE)
_A : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Tuple = [*signature.parameters.keys()]
_A : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def A ( self : int):
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = True
for model_class in self.all_model_classes:
_A : Union[str, Any] = True
_A : Optional[Any] = False
_A : Union[str, Any] = True
_A : Optional[Any] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_A : str = outputs.attentions
_A : int = len(self.model_tester.depths)
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A : int = True
_A : int = config.window_size**2
_A : Optional[int] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_A : List[str] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_A : int = len(SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
_A : Optional[Any] = True
_A : Any = True
_A : str = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
if hasattr(self.model_tester , 'num_hidden_states_types'):
_A : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_A : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE))
_A : Dict = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str):
_A : List[str] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_A : Dict = outputs.hidden_states
_A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
# Swinv2 has a different seq_length
_A : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_A : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
_A , _A , _A , _A : Optional[Any] = reshaped_hidden_states[0].shape
_A : Optional[int] = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def A ( self : Tuple):
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : Any = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : List[str]):
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[Any] = 3
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[str] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width))
def A ( self : str):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
@slow
def A ( self : List[str]):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = _config_zero_init(SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(config=SCREAMING_SNAKE_CASE)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : List[str]):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def A ( self : Union[str, Any]):
_A : List[Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
SCREAMING_SNAKE_CASE)
_A : Any = self.default_image_processor
_A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_A : int = model(**SCREAMING_SNAKE_CASE)
# verify the logits
_A : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE)
_A : int = torch.tensor([-0.3947, -0.4306, 0.0026]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
| 227
|
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ):
_A : Dict = multiprocessing.Manager()
_A : List[Any] = manager.list()
_A : Dict = multiprocessing.Process(target=lowerCamelCase ,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_A : Any = shutil.rmtree
_A : Optional[int] = os.rmdir
_A : str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_A : str = {}
with swallow_io():
with time_limit(lowerCamelCase ):
exec(lowerCamelCase ,lowerCamelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
_A : Optional[int] = rmtree
_A : Optional[Any] = rmdir
_A : Dict = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( lowerCamelCase : int ):
def signal_handler(lowerCamelCase : str ,lowerCamelCase : Any ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL ,lowerCamelCase )
signal.signal(signal.SIGALRM ,lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL ,0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
_A : Any = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCamelCase ):
with contextlib.redirect_stderr(lowerCamelCase ):
with redirect_stdin(lowerCamelCase ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCamelCase ):
yield dirname
class __lowerCamelCase ( a_ ):
"""simple docstring"""
pass
class __lowerCamelCase ( io.StringIO ):
"""simple docstring"""
def A ( self : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict):
raise OSError
def A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any]):
raise OSError
def A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int]):
raise OSError
def A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple):
return False
class __lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
a = "stdin"
@contextlib.contextmanager
def lowerCAmelCase__ ( lowerCamelCase : Tuple ):
if root == ".":
yield
return
_A : Any = os.getcwd()
os.chdir(lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : List[Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_A : List[Any] = None
_A : Dict = None
import os
_A : Union[str, Any] = '1'
_A : int = None
_A : Optional[int] = None
_A : int = None
_A : Any = None
_A : Optional[int] = None
_A : Union[str, Any] = None
_A : List[Any] = None
_A : int = None
_A : List[Any] = None
_A : Tuple = None
_A : Any = None
_A : Tuple = None
_A : Optional[int] = None
_A : Optional[Any] = None
_A : str = None
_A : Dict = None
_A : List[str] = None
_A : Union[str, Any] = None
_A : Union[str, Any] = None
_A : str = None
_A : str = None
_A : str = None
_A : Any = None
_A : Union[str, Any] = None
_A : str = None
_A : List[str] = None
_A : Union[str, Any] = None
import shutil
_A : int = None
_A : Any = None
_A : List[Any] = None
import subprocess
_A : Optional[Any] = None # type: ignore
_A : List[Any] = None
import sys
_A : Any = None
_A : Tuple = None
_A : str = None
_A : Tuple = None
_A : List[str] = None
| 227
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = MvpTokenizer
lowerCamelCase_ : List[Any] = MvpTokenizerFast
lowerCamelCase_ : str = True
lowerCamelCase_ : str = filter_roberta_detectors
def _lowercase ( self ) -> Any:
super().setUp()
lowerCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def _lowercase ( self , **UpperCamelCase__ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , **UpperCamelCase__ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self ) -> Tuple:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def _lowercase ( self ) -> Optional[Any]:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase : Dict = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : Union[str, Any] = tokenizer(UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , return_tensors="pt" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Test that special tokens are reset
@require_torch
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : str = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , UpperCamelCase__ )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertNotIn("labels" , UpperCamelCase__ )
self.assertNotIn("decoder_attention_mask" , UpperCamelCase__ )
@require_torch
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def _lowercase ( self ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : Union[str, Any] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def _lowercase ( self ) -> int:
lowerCamelCase : str = ["A long paragraph for summarization."]
lowerCamelCase : List[str] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : str = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ , return_tensors="pt" )
lowerCamelCase : Union[str, Any] = inputs["input_ids"]
lowerCamelCase : List[str] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowercase ( self ) -> Optional[Any]:
pass
def _lowercase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[Any] = "A, <mask> AllenNLP sentence."
lowerCamelCase : str = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
lowerCamelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCamelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCamelCase : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 48
|
'''simple docstring'''
lowerCAmelCase : str ='''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase : int =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase : List[str] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 223
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwinConfig()
__SCREAMING_SNAKE_CASE = swin_name.split("_" )
__SCREAMING_SNAKE_CASE = name_split[1]
__SCREAMING_SNAKE_CASE = int(name_split[4] )
__SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE = 96
__SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE = 96
__SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
__SCREAMING_SNAKE_CASE = 2_1841
else:
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
__SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = img_size
__SCREAMING_SNAKE_CASE = num_classes
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
return config
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__SCREAMING_SNAKE_CASE = "encoder." + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = "layernorm.weight"
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = "layernorm.bias"
if "head" in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
else:
__SCREAMING_SNAKE_CASE = "swin." + name
return name
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[
:dim
]
__SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
__SCREAMING_SNAKE_CASE = get_swin_config(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwinForImageClassification(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
__SCREAMING_SNAKE_CASE = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase_ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = timm_model(inputs["pixel_values"] )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 195
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0 for i in range(r + 1 )]
# nc0 = 1
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 195
| 1
|
def UpperCamelCase_( lowerCamelCase_ ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowercase : str = [True] * (num + 1)
_lowercase : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase_ ):
_lowercase : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 21
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77
| 0
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : str , A__ : Tuple ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 12
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self :Dict )-> Dict:
A__ = []
def UpperCAmelCase_ ( self :str , lowercase_ :int , lowercase_ :int , lowercase_ :List[str] , **lowercase_ :Tuple )-> int:
self.events.append("on_init_end" )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] )-> Union[str, Any]:
self.events.append("on_train_begin" )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Any , lowercase_ :Optional[Any] , lowercase_ :Tuple , **lowercase_ :Dict )-> str:
self.events.append("on_train_end" )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Any , lowercase_ :Any , lowercase_ :Dict , **lowercase_ :Optional[Any] )-> str:
self.events.append("on_epoch_begin" )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :Optional[Any] , **lowercase_ :Optional[Any] )-> List[Any]:
self.events.append("on_epoch_end" )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Any , lowercase_ :int , lowercase_ :Optional[int] , **lowercase_ :int )-> List[Any]:
self.events.append("on_step_begin" )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :int , **lowercase_ :Union[str, Any] )-> List[str]:
self.events.append("on_step_end" )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Any , lowercase_ :int , lowercase_ :Any , **lowercase_ :int )-> List[Any]:
self.events.append("on_evaluate" )
def UpperCAmelCase_ ( self :Any , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Optional[Any] , **lowercase_ :str )-> str:
self.events.append("on_predict" )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :List[str] , **lowercase_ :str )-> List[Any]:
self.events.append("on_save" )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :str , **lowercase_ :str )-> Tuple:
self.events.append("on_log" )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Any , lowercase_ :str , lowercase_ :Optional[Any] , **lowercase_ :str )-> Optional[Any]:
self.events.append("on_prediction_step" )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :int )-> Optional[int]:
A__ = tempfile.mkdtemp()
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
shutil.rmtree(self.output_dir )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Union[str, Any]=0 , lowercase_ :Any=0 , lowercase_ :Dict=64 , lowercase_ :Optional[Any]=64 , lowercase_ :List[str]=None , lowercase_ :Tuple=False , **lowercase_ :Union[str, Any] )-> Tuple:
A__ = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
A__ = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
A__ = RegressionModelConfig(a=_SCREAMING_SNAKE_CASE , b=_SCREAMING_SNAKE_CASE )
A__ = RegressionPreTrainedModel(_SCREAMING_SNAKE_CASE )
A__ = TrainingArguments(self.output_dir , disable_tqdm=_SCREAMING_SNAKE_CASE , report_to=[] , **_SCREAMING_SNAKE_CASE )
return Trainer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , callbacks=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :int , lowercase_ :List[Any] )-> Dict:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
A__ = sorted(_SCREAMING_SNAKE_CASE , key=lambda lowercase_ : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
A__ = sorted(_SCREAMING_SNAKE_CASE , key=lambda lowercase_ : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , _SCREAMING_SNAKE_CASE )
else:
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self :str , lowercase_ :str )-> Tuple:
A__ = ["on_init_end", "on_train_begin"]
A__ = 0
A__ = len(trainer.get_eval_dataloader() )
A__ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(_SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[Any]:
A__ = self.get_trainer()
A__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
A__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
A__ = self.get_trainer(disable_tqdm=_SCREAMING_SNAKE_CASE )
A__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
A__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
A__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
A__ = self.get_trainer()
A__ = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
A__ = self.get_trainer()
A__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
A__ = self.get_trainer()
A__ = trainer.callback_handler.callbacks[0]
A__ = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self :Any )-> List[str]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=_SCREAMING_SNAKE_CASE )
A__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
A__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
A__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
A__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
A__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
A__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
A__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
A__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
A__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
A__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# A bit of everything
A__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
A__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
A__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 364
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : List[Any] ="examples/"
__lowerCAmelCase : Dict ={
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : str ="README.md"
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
A__, A__ = REPLACE_PATTERNS[pattern]
A__ = replace.replace("VERSION" , _lowerCamelCase )
A__ = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="examples" )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def UpperCamelCase ( ):
A__ = "🤗 Transformers currently provides the following architectures"
A__ = "1. Want to contribute a new model?"
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Find the start of the list.
A__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
def UpperCamelCase ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
A__ = f.read()
A__ = REPLACE_PATTERNS["init"][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict=False ):
A__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A__ = default_version.base_version
elif patch:
A__ = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
A__ = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
A__ = input(F"Which version are you releasing? [{default_version}]" )
if len(_lowerCamelCase ) == 0:
A__ = default_version
print(F"Updating version to {version}." )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
A__ = get_version()
A__ = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
A__ = current_version.base_version
# Check with the user we got that right.
A__ = input(F"Which version are we developing now? [{dev_version}]" )
if len(_lowerCamelCase ) == 0:
A__ = dev_version
print(F"Updating version to {version}." )
global_version_update(_lowerCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : int =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 123
| 0
|
from __future__ import annotations
def a( A : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def a( A : list[int | str] , A : list[int | str] , A : int , A : list[int] , ) -> None:
"""simple docstring"""
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
a = False
_lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 227
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = 50 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase_ ), "This is a local test"
| 227
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=False ):
"""simple docstring"""
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A = ""
else:
A = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
A = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = dct.pop(__snake_case )
A = val
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
A = ViTMSNConfig()
A = 1_000
A = "datasets/huggingface/label-files"
A = "imagenet-1k-id2label.json"
A = json.load(open(hf_hub_download(__snake_case , __snake_case ) , "r" ) )
A = {int(__snake_case ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A = 384
A = 1_536
A = 6
elif "l16" in checkpoint_url:
A = 1_024
A = 4_096
A = 24
A = 16
A = 0.1
elif "b4" in checkpoint_url:
A = 4
elif "l7" in checkpoint_url:
A = 7
A = 1_024
A = 4_096
A = 24
A = 16
A = 0.1
A = ViTMSNModel(__snake_case )
A = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )["target_encoder"]
A = ViTImageProcessor(size=config.image_size )
remove_projection_head(__snake_case )
A = create_rename_keys(__snake_case , base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , base_model=__snake_case )
model.load_state_dict(__snake_case )
model.eval()
A = "http://images.cocodataset.org/val2017/000000039769.jpg"
A = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
A = ViTImageProcessor(
size=config.image_size , image_mean=__snake_case , image_std=__snake_case )
A = image_processor(images=__snake_case , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A = model(**__snake_case )
A = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
A = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
A = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
A = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
A = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __snake_case , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__A : Dict = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 363
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : str = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = "lm_head"
A = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
A = getattr(lowercase__ , lowercase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == "group" , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A = True
if "*" in mapped_key:
A = name.split(lowercase__ )[0].split("." )[-2]
A = mapped_key.replace("*" , lowercase__ )
if "weight_g" in name:
A = "weight_g"
elif "weight_v" in name:
A = "weight_v"
elif "bias" in name:
A = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = "weight"
else:
A = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = full_name.split("conv_layers." )[-1]
A = name.split("." )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ):
"""simple docstring"""
if config_path is not None:
A = UniSpeechConfig.from_pretrained(lowercase__ )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(lowercase__ , "vocab.json" )
if not os.path.isdir(lowercase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(lowercase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
A = WavaVecaPhonemeCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase__ , )
A = True if config.feat_extract_norm == "layer" else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
A = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
A = UniSpeechForCTC(lowercase__ )
else:
A = UniSpeechForPreTraining(lowercase__ )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_unispeech.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 57
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowercase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = self.get_image_processor()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowercase = self.get_image_processor(do_normalize=snake_case )
lowercase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(snake_case , return_tensors='np' )
lowercase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = processor(text=snake_case )
lowercase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(snake_case )
lowercase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 195
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1.5
lowercase = int(factor * num_class_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase = client.query(text=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase = int(factor * num_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowercase = 0
lowercase = 0
lowercase = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase = class_images[count]
count += 1
try:
lowercase = requests.get(images['url'] )
if img.status_code == 200:
lowercase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 195
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCAmelCase : Optional[int] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Any = sd.pop(_lowerCamelCase )
_lowerCAmelCase : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : Dict = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : str = key.replace(".qkv_proj." , ".q_proj." )
_lowerCAmelCase : Dict = key.replace(".qkv_proj." , ".k_proj." )
_lowerCAmelCase : Union[str, Any] = key.replace(".qkv_proj." , ".v_proj." )
_lowerCAmelCase : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase : Tuple = q
_lowerCAmelCase : Dict = k
_lowerCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_checkpoint(_lowerCamelCase )
if config is not None:
_lowerCAmelCase : Any = OPTConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = OPTConfig()
_lowerCAmelCase : str = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
def lowerCAmelCase__( lowercase : float , lowercase : float , lowercase : int ) -> float:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowercase , lowercase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__snake_case : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__snake_case : Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase = 4
_UpperCamelCase = 3
class _lowerCamelCase ( a ):
"""simple docstring"""
pass
def lowerCAmelCase__( lowercase : List[str] ) -> Any:
for shard in shards:
for i in range(lowercase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : List[Any] = int(os.environ["RANK"] )
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : List[str] = ArgumentParser()
parser.add_argument("--streaming" , type=lowercase )
parser.add_argument("--local_rank" , type=lowercase )
parser.add_argument("--num_workers" , type=lowercase , default=0 )
__snake_case : Any = parser.parse_args()
__snake_case : Dict = args.streaming
__snake_case : Union[str, Any] = args.num_workers
__snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]}
__snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase )
if not streaming:
__snake_case : Any = Dataset.from_list(list(lowercase ) )
__snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase )
__snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase )
__snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 326
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ = {"""unk_token""": """<unk>"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
A_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A_ = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCamelCase__ , return_tensors="""np""" )
A_ = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = processor(text=UpperCamelCase__ )
A_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCamelCase__ )
A_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 101
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
def __init__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = str(id_ )
A_ = None
A_ = None
A_ = []
A_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Dict:
'''simple docstring'''
return self.id
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list:
A_ = []
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = graph[:]
while q:
A_ = min(UpperCAmelCase__ )
q.remove(UpperCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
for i in range(1, len(UpperCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = list(UpperCAmelCase__ )
hq.heapify(UpperCAmelCase__ )
while h:
A_ = hq.heappop(UpperCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
hq.heapify(UpperCAmelCase__ )
for i in range(1, len(UpperCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 1
|
'''simple docstring'''
import numpy as np
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float = 1E-1_2 , UpperCAmelCase_ : int = 100 , ):
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase_ ) == np.iscomplexobj(UpperCAmelCase_ )
lowerCamelCase_ = np.iscomplexobj(UpperCAmelCase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1E1_2
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
# Normalize the resulting output vector.
lowerCamelCase_ = w / np.linalg.norm(UpperCAmelCase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ = vector.conj().T if is_complex else vector.T
lowerCamelCase_ = np.dot(UpperCAmelCase_ , np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Check convergence.
lowerCamelCase_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ = True
lowerCamelCase_ = lambda_
if is_complex:
lowerCamelCase_ = np.real(lambda_ )
return lambda_, vector
def __snake_case ( ):
lowerCamelCase_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase_ = np.array([41, 4, 20] )
lowerCamelCase_ = real_input_matrix.astype(np.complexaaa )
lowerCamelCase_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ = real_input_matrix
lowerCamelCase_ = real_vector
elif problem_type == "complex":
lowerCamelCase_ = complex_input_matrix
lowerCamelCase_ = complex_vector
# Our implementation.
lowerCamelCase_ ,lowerCamelCase_ = power_iteration(UpperCAmelCase_ , UpperCAmelCase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ ,lowerCamelCase_ = np.linalg.eigh(UpperCAmelCase_ )
# Last eigenvalue is the maximum one.
lowerCamelCase_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase_ ) - np.abs(UpperCAmelCase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 55
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = []
__snake_case , __snake_case : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : Tuple = result + left + right
return input_list
def lowerCAmelCase_ ( __lowerCamelCase ):
if len(__lowerCamelCase ) <= 1:
return input_list
__snake_case : Optional[Any] = list(__lowerCamelCase )
# iteration for two-way merging
__snake_case : Dict = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
__snake_case : Union[str, Any] = i
__snake_case : int = i + p - 1
__snake_case : Optional[int] = (low + high + 1) // 2
__snake_case : Any = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
__snake_case : Optional[Any] = i
__snake_case : Dict = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_snake_case : str = []
else:
_snake_case : int = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 123
| 0
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = 1.5
__UpperCamelCase = int(factor * num_class_images )
__UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=a__ ,aesthetic_weight=0.1 )
os.makedirs(f"{class_data_dir}/images" ,exist_ok=a__ )
if len(list(Path(f"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__UpperCamelCase = int(factor * num_images )
__UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=a__ ,aesthetic_weight=0.1 ,)
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = tqdm(desc="""downloading real regularization images""" ,total=a__ )
with open(f"{class_data_dir}/caption.txt" ,"""w""" ) as fa, open(f"{class_data_dir}/urls.txt" ,"""w""" ) as fa, open(
f"{class_data_dir}/images.txt" ,"""w""" ) as fa:
while total < num_class_images:
__UpperCamelCase = class_images[count]
count += 1
try:
__UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
__UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f"{class_data_dir}/images/{total}.jpg" ,"""wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser("""""" ,add_help=a__ )
parser.add_argument("""--class_prompt""" ,help="""text prompt to retrieve images""" ,required=a__ ,type=a__ )
parser.add_argument("""--class_data_dir""" ,help="""path to save images""" ,required=a__ ,type=a__ )
parser.add_argument("""--num_class_images""" ,help="""number of images to download""" ,default=200 ,type=a__ )
return parser.parse_args()
if __name__ == "__main__":
a__ : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 371
|
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __snake_case ( ):
lowerCamelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" )
return image
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
lowerCamelCase_ = dct.pop(UpperCAmelCase_ )
lowerCamelCase_ = val
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase_ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
lowerCamelCase_ = qkv_bias
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
lowerCamelCase_ = 364 if "coco" in model_name else 224
lowerCamelCase_ = BlipaVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase_ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase_ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
lowerCamelCase_ = BlipaConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[str, Any]=False ):
lowerCamelCase_ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
lowerCamelCase_ = tokenizer("\n" , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
lowerCamelCase_ ,lowerCamelCase_ = get_blipa_config(UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase_ = BlipaForConditionalGeneration(UpperCAmelCase_ ).eval()
lowerCamelCase_ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
lowerCamelCase_ ,lowerCamelCase_ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCamelCase_ = original_model.state_dict()
lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ )
if key.startswith("Qformer.bert" ):
lowerCamelCase_ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCamelCase_ = key.replace("self" , "attention" )
if "opt_proj" in key:
lowerCamelCase_ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
lowerCamelCase_ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
lowerCamelCase_ = key.replace("opt" , "language" )
if key.startswith("t5" ):
lowerCamelCase_ = key.replace("t5" , "language" )
lowerCamelCase_ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase_ = load_demo_image()
lowerCamelCase_ = vis_processors["eval"](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
lowerCamelCase_ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(UpperCAmelCase_ )
# create processor
lowerCamelCase_ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
lowerCamelCase_ = BlipaProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
lowerCamelCase_ = processor(images=UpperCAmelCase_ , return_tensors="pt" ).pixel_values.to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase_ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
lowerCamelCase_ = hf_model(UpperCAmelCase_ , UpperCAmelCase_ ).logits
else:
lowerCamelCase_ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
lowerCamelCase_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase_ = hf_model(UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase_ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCAmelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase_ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCAmelCase_ )
else:
# cast to same type
lowerCamelCase_ = logits.dtype
assert torch.allclose(original_logits.to(UpperCAmelCase_ ) , UpperCAmelCase_ , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
lowerCamelCase_ = ""
lowerCamelCase_ = tokenizer(UpperCAmelCase_ , return_tensors="pt" ).input_ids.to(UpperCAmelCase_ )
lowerCamelCase_ = original_model.generate({"image": original_pixel_values} )
lowerCamelCase_ = hf_model.generate(
UpperCAmelCase_ , UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , UpperCAmelCase_ )
lowerCamelCase_ = input_ids.shape[1]
lowerCamelCase_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCAmelCase_ )
lowerCamelCase_ = [text.strip() for text in output_text]
print("HF generation:" , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
a_ : Optional[Any] = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowercase ( __snake_case ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """roberta"""
def __init__( self , _snake_case=5_0265 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class lowercase ( __snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 370
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__magic_name__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300
| 1
|
from math import ceil, sqrt
def __UpperCamelCase ( _A : int = 1000000 ) ->int:
"""simple docstring"""
lowerCamelCase_ =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase_ =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase_ =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 49
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =SMALL_MODEL_IDENTIFIER
lowerCamelCase_ ="""pt"""
lowerCamelCase_ ="""tf"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =TFAutoModel.from_pretrained(self.test_model , from_pt=_SCREAMING_SNAKE_CASE )
model_tf.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ ="""mock_framework"""
# Framework provided - return whatever the user provides
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch(
"""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch(
"""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
| 49
| 1
|
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 101
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[Any] =IFPipeline
lowercase_ : List[str] =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase_ : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : int =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
# if
lowercase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa)
lowercase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa ,text_encoder=A__ ,tokenizer=A__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowercase , lowercase = pipe_a.encode_prompt('''anime turtle''' ,device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase = None
lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A__ ,A__ ,A__ ,A__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase = IFImgaImgPipeline(**pipe_a.components)
lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A__ ,A__ ,A__ ,A__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase = IFInpaintingPipeline(**pipe_a.components)
lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A__ ,A__ ,A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(1)).to(A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(1)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def UpperCamelCase ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 101
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
def __init__( self :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Optional[int]=1_3 ,__lowercase :Union[str, Any]=7 ,__lowercase :int=True ,__lowercase :Optional[int]=True ,__lowercase :Tuple=False ,__lowercase :List[str]=True ,__lowercase :Optional[Any]=9_9 ,__lowercase :str=3_2 ,__lowercase :Any=5 ,__lowercase :Optional[int]=4 ,__lowercase :Optional[int]=3_7 ,__lowercase :Optional[Any]="gelu" ,__lowercase :int=0.1 ,__lowercase :str=0.1 ,__lowercase :List[str]=5_1_2 ,__lowercase :int=1_6 ,__lowercase :Optional[int]=2 ,__lowercase :int=0.02 ,__lowercase :Any=3 ,__lowercase :List[str]=4 ,__lowercase :int=None ,):
snake_case__ : Dict = parent
snake_case__ : int = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Tuple = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Dict = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : int = num_labels
snake_case__ : Optional[int] = num_choices
snake_case__ : Any = scope
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : str = None
snake_case__ : Tuple = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :Tuple ):
snake_case__ : Union[str, Any] = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[int] = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : List[str] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[str] ,__lowercase :Union[str, Any] ,__lowercase :str ,__lowercase :Any ,__lowercase :Tuple ,__lowercase :List[Any] ,__lowercase :str ,__lowercase :Tuple ,__lowercase :Tuple ,):
snake_case__ : Any = True
snake_case__ : Any = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,)
snake_case__ : Optional[Any] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,)
snake_case__ : str = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :List[str] ,__lowercase :int ,__lowercase :str ,):
snake_case__ : int = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :List[str] ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,):
snake_case__ : Tuple = True
snake_case__ : Optional[int] = True
snake_case__ : List[str] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,use_cache=__lowercase ,)
snake_case__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : str = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Any = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
(
snake_case__
) : int = config_and_inputs
snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : Any = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self :Dict ):
snake_case__ : Dict = LlamaModelTester(self )
snake_case__ : List[str] = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :str ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = 3
snake_case__ : List[str] = input_dict['''input_ids''']
snake_case__ : str = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :int ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = 3
snake_case__ : List[Any] = '''single_label_classification'''
snake_case__ : int = input_dict['''input_ids''']
snake_case__ : List[str] = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Tuple = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Dict = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :str ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = 3
snake_case__ : List[Any] = '''multi_label_classification'''
snake_case__ : Dict = input_dict['''input_ids''']
snake_case__ : str = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __lowerCamelCase ( self :List[str] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCamelCase ( self :Dict ,__lowercase :List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = ids_tensor([1, 1_0] ,config.vocab_size )
snake_case__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Union[str, Any] = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case__ : Union[str, Any] = original_model(__lowercase ).last_hidden_state
snake_case__ : List[Any] = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Tuple = {'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : List[Any] = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case__ : List[str] = scaled_model(__lowercase ).last_hidden_state
snake_case__ : int = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Tuple ):
snake_case__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
snake_case__ : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
snake_case__ : Any = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : Optional[int] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
snake_case__ : Union[str, Any] = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
snake_case__ : Optional[Any] = model(torch.tensor(__lowercase ) )
snake_case__ : str = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
snake_case__ : str = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that '''
snake_case__ : Tuple = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case__ : Dict = tokenizer.encode(__lowercase ,return_tensors='''pt''' )
snake_case__ : str = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=__lowercase )
# greedy generation outputs
snake_case__ : List[str] = model.generate(__lowercase ,max_new_tokens=6_4 ,top_p=__lowercase ,temperature=1 ,do_sample=__lowercase )
snake_case__ : List[str] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
| 364
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple=7 ,__lowercase :Optional[Any]=3 ,__lowercase :Dict=3_0 ,__lowercase :Union[str, Any]=4_0_0 ,__lowercase :Optional[int]=True ,__lowercase :int=None ,__lowercase :int=0.9 ,__lowercase :Optional[int]=None ,__lowercase :Dict=True ,__lowercase :str=[0.5, 0.5, 0.5] ,__lowercase :str=[0.5, 0.5, 0.5] ,):
snake_case__ : List[Any] = size if size is not None else {'''shortest_edge''': 3_0}
snake_case__ : Any = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : str = do_resize_and_center_crop
snake_case__ : Dict = size
snake_case__ : Union[str, Any] = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : List[str] = image_std
def __lowerCamelCase ( self :Any ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Any = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self :Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''crop_pct''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCamelCase ( self :int ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :List[str] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 44
| 0
|
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
a =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 243
| 0
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[Any] , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: int ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 29
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 105
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCamelCase , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = RobertaConfig
snake_case_ = """roberta"""
def __init__( self : Any , __lowercase : Union[str, Any] ) -> Optional[int]:
super().__init__(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =RobertaEmbeddings(__lowercase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowerCamelCase , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = RobertaConfig
snake_case_ = """roberta"""
def __init__( self : Tuple , __lowercase : Dict ) -> Dict:
super().__init__(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =config.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] =config.num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =DeeRobertaModel(__lowercase )
SCREAMING_SNAKE_CASE__ : int =nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Dict =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowercase )
def __magic_name__ ( self : str , __lowercase : Optional[int]=None , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : Optional[int]=None , __lowercase : Optional[Any]=None , __lowercase : Optional[Any]=None , __lowercase : List[str]=None , __lowercase : Optional[int]=-1 , __lowercase : str=False , ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] =self.num_layers
try:
SCREAMING_SNAKE_CASE__ : List[str] =self.roberta(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs[1]
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dropout(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =self.classifier(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =e.message
SCREAMING_SNAKE_CASE__ : Any =e.exit_layer
SCREAMING_SNAKE_CASE__ : List[Any] =outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entropy(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : str =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Optional[int] =MSELoss()
SCREAMING_SNAKE_CASE__ : str =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : List[str] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ : Any =[]
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : List[str] =MSELoss()
SCREAMING_SNAKE_CASE__ : int =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Dict =CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Optional[int] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
SCREAMING_SNAKE_CASE__ : str =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ : List[str] =(loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ : Tuple =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ : str =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 152
| 0
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCamelCase__ ( __lowerCAmelCase ):
def __init__(self : Dict , snake_case_ : str=None , snake_case_ : List[str]=None , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if config is None:
assert isinstance(self.model , lowerCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
__a : Union[str, Any] = self.model.config
else:
__a : List[Any] = config
__a : Dict = data_args
__a : str = self.config.tgt_vocab_size if isinstance(self.config , lowerCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
__a : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__a : Optional[Any] = label_smoothed_nll_loss
def lowerCAmelCase (self : List[Any] , snake_case_ : int ):
if self.optimizer is None:
__a : List[str] = ['''bias''', '''LayerNorm.weight''']
__a : Optional[Any] = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__a : int = Adafactor
__a : Union[str, Any] = {'''scale_parameter''': False, '''relative_step''': False}
else:
__a : Tuple = AdamW
__a : List[Any] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__a : str = self.args.learning_rate
if self.sharded_ddp:
__a : str = OSS(
params=lowerCamelCase__ , optim=lowerCamelCase__ , **lowerCamelCase__ , )
else:
__a : int = optimizer_cls(lowerCamelCase__ , **lowerCamelCase__ )
if self.lr_scheduler is None:
__a : Optional[int] = self._get_lr_scheduler(lowerCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowerCAmelCase (self : List[Any] , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__a : str = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__a : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__a : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCamelCase__ )
return scheduler
def lowerCAmelCase (self : Any ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : int ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__a : Any = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
__a : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__a : List[Any] = model(**lowerCamelCase__ , labels=lowerCamelCase__ , use_cache=lowerCamelCase__ )[:2]
else:
# compute label smoothed loss
__a : Optional[Any] = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
__a : List[Any] = torch.nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
__a : Dict = self.loss_fn(lowerCamelCase__ , lowerCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase (self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
__a : Tuple = inputs.pop('''labels''' )
__a : Tuple = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return loss
def lowerCAmelCase (self : List[Any] , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] , snake_case_ : bool , snake_case_ : Optional[List[str]] = None , ):
__a : Dict = self._prepare_inputs(lowerCamelCase__ )
__a : Dict = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__a : Tuple = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **lowerCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__a : Any = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs['''max_length'''] )
__a : Optional[Any] = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__a : str = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__a : Union[str, Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__a : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__a : int = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowerCAmelCase (self : int , snake_case_ : Tuple , snake_case_ : str ):
__a : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
__a : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__a : Optional[Any] = tensor
return padded_tensor
| 357
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = PhobertTokenizer
_SCREAMING_SNAKE_CASE : int = False
def lowerCAmelCase (self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__a : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : int = ['''#version: 0.2''', '''l à</w>''']
__a : List[Any] = {'''unk_token''': '''<unk>'''}
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def lowerCAmelCase (self : Union[str, Any] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : Union[str, Any] = '''Tôi là VinAI Research'''
__a : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Any = '''Tôi là VinAI Research'''
__a : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__a : List[str] = tokenizer.tokenize(snake_case_ )
print(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : str = tokens + [tokenizer.unk_token]
__a : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 90
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__UpperCAmelCase ) ,'''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.resolver.convert_models(['''heb-eng'''])
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a , __a = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__SCREAMING_SNAKE_CASE)
assert mmeta["long_pair"] == "heb-eng"
| 49
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 1
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
__UpperCamelCase = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
__UpperCamelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCamelCase = [0] * args.vocab_size
for k, v in counter.items():
__UpperCamelCase = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 38
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if len(lowerCAmelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = degree
def __add__( self , lowerCAmelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase__ )
def __sub__( self , lowerCAmelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase__ ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> int | float:
SCREAMING_SNAKE_CASE = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def __A ( self ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ = 0 ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase__ )
def __eq__( self , lowerCAmelCase__ ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase__ ) -> bool:
return not self.__eq__(lowerCAmelCase__ )
| 38
| 1
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: List[Any] , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Any ) -> int:
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
UpperCamelCase__ : Tuple = json.load(_lowerCamelCase )
UpperCamelCase__ : Dict = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ : List[Any] = torch.load(_lowerCamelCase , map_location='''cpu''' )
# Load the entity vocab file
UpperCamelCase__ : Optional[int] = load_entity_vocab(_lowerCamelCase )
UpperCamelCase__ : List[Any] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ : Optional[Any] = AddedToken('''<ent>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
UpperCamelCase__ : Optional[int] = AddedToken('''<ent2>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase__ : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
UpperCamelCase__ : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase__ : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
UpperCamelCase__ : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
UpperCamelCase__ : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ : Any = f"encoder.layer.{layer_index}.attention.self."
UpperCamelCase__ : str = state_dict[prefix + matrix_name]
UpperCamelCase__ : Dict = state_dict[prefix + matrix_name]
UpperCamelCase__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase__ : Dict = entity_emb[entity_vocab["""[MASK]"""]]
UpperCamelCase__ : int = LukeModel(config=_lowerCamelCase ).eval()
UpperCamelCase__ : str = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if not (len(_lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(_lowerCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
UpperCamelCase__ : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase , task='''entity_classification''' )
UpperCamelCase__ : int = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
UpperCamelCase__ : Dict = (39, 42)
UpperCamelCase__ : Optional[Any] = tokenizer(_lowerCamelCase , entity_spans=[span] , add_prefix_space=_lowerCamelCase , return_tensors='''pt''' )
UpperCamelCase__ : List[str] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
UpperCamelCase__ : List[str] = torch.Size((1, 42, 1024) )
UpperCamelCase__ : List[str] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
UpperCamelCase__ : int = torch.Size((1, 42, 768) )
UpperCamelCase__ : Tuple = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCamelCase__ : Optional[int] = torch.Size((1, 1, 1024) )
UpperCamelCase__ : Tuple = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
UpperCamelCase__ : Optional[int] = torch.Size((1, 1, 768) )
UpperCamelCase__ : Dict = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Dict:
UpperCamelCase__ : List[str] = {}
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_lowerCamelCase ):
UpperCamelCase__ : Optional[int] = line.rstrip().split('''\t''' )
UpperCamelCase__ : Any = index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 201
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 0
|
"""simple docstring"""
class _UpperCAmelCase:
def __init__( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = name
_UpperCamelCase = value
_UpperCamelCase = weight
def __repr__( self) -> List[str]:
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return self.value
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.name
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.weight
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.value / self.weight
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = sorted(__snake_case, key=__snake_case, reverse=__snake_case )
_UpperCamelCase = []
_UpperCamelCase , _UpperCamelCase = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=10_00 , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = t
_UpperCamelCase = tf.convert_to_tensor(__a)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModel(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , __a , token_type_ids=__a)
_UpperCamelCase = model(__a , __a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForMaskedLM(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMForSequenceClassification(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMForTokenClassification(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForQuestionAnswering(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = True
lowercase__ = 10
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFLayoutLMModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip('''Onnx compliancy broke with TF 2.10''')
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_UpperCamelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''')
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a)
# test the sequence output on [0, :3, :3]
_UpperCamelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-3))
# test the pooled output on [1, :3]
_UpperCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __a , atol=1e-3))
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
_UpperCamelCase = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=tf.convert_to_tensor([1, 1]) , )
# test whether we get a loss as a scalar
_UpperCamelCase = outputs.loss
_UpperCamelCase = (2,)
self.assertEqual(loss.shape , __a)
# test the shape of the logits
_UpperCamelCase = outputs.logits
_UpperCamelCase = (2, 2)
self.assertEqual(logits.shape , __a)
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
# initialize model with randomly initialized token classification head
_UpperCamelCase = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=__a)
# test the shape of the logits
_UpperCamelCase = outputs.logits
_UpperCamelCase = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape , __a)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized token classification head
_UpperCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''')
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a)
# test the shape of the logits
_UpperCamelCase = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape , __a)
self.assertEqual(outputs.end_logits.shape , __a)
| 100
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = CLIPTokenizer
_snake_case : Tuple = CLIPTokenizerFast
_snake_case : List[Any] = True
_snake_case : Dict = {}
_snake_case : Optional[int] = False
def __UpperCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
UpperCAmelCase_ : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase_ : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCAmelCase_ : Any = {'unk_token': '<unk>'}
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = 'lower newer'
UpperCAmelCase_ : Any = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Tuple = 'lower newer'
UpperCAmelCase_ : Any = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[Any] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCAmelCase_ : str = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase_ : Any = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCAmelCase_ : Optional[int] = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase_ : int = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase_ : Optional[Any] = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase_ : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase_ : Any = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Optional[int] = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Optional[int] = f" {text}"
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __UpperCAmelCase ( self ) -> Any:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def __UpperCAmelCase ( self ) -> Optional[Any]:
super().test_tokenization_python_rust_equals()
def __UpperCAmelCase ( self ) -> List[Any]:
# CLIP always lower cases letters
pass
| 29
|
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29
| 1
|
"""simple docstring"""
from math import ceil
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =list(range(0 , snake_case_ ) )
__UpperCamelCase =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCamelCase =[]
for i in device_map_blocks:
if device_map_blocks.count(snake_case_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case_ )
# Missing blocks
__UpperCamelCase =[i for i in blocks if i not in device_map_blocks]
__UpperCamelCase =[i for i in device_map_blocks if i not in blocks]
if len(snake_case_ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(snake_case_ ) )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =list(range(snake_case_ ) )
__UpperCamelCase =int(ceil(n_layers / len(snake_case_ ) ) )
__UpperCamelCase =[layers[i : i + n_blocks] for i in range(0 , snake_case_ , snake_case_ )]
return dict(zip(snake_case_ , snake_case_ ) )
| 355
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowercase = logging.getLogger(__name__)
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCamelCase , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCamelCase , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__UpperCamelCase =parser.parse_args()
return args
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
def fn(__UpperCamelCase : Union[str, Any] ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =[]
for i in range(len(tokenized_data['''input_ids'''] ) ):
__UpperCamelCase ={
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__UpperCamelCase =tf.train.Features(feature=__UpperCamelCase )
__UpperCamelCase =tf.train.Example(features=__UpperCamelCase )
__UpperCamelCase =example.SerializeToString()
records.append(__UpperCamelCase )
return records
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__UpperCamelCase =min(len(__UpperCamelCase ) , args.limit )
__UpperCamelCase =dataset.select(range(__UpperCamelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
__UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__UpperCamelCase =os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
__UpperCamelCase =os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__UpperCamelCase =tokenize_function(__UpperCamelCase )
__UpperCamelCase =dataset.map(__UpperCamelCase , batched=__UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCamelCase : Union[str, Any] ):
# Concatenate all texts.
__UpperCamelCase ={k: sum(examples[k] , [] ) for k in examples.keys()}
__UpperCamelCase =len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__UpperCamelCase =(total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__UpperCamelCase ={
k: [t[i : i + args.max_length] for i in range(0 , __UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__UpperCamelCase =dataset_tokenized.map(__UpperCamelCase , batched=__UpperCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__UpperCamelCase =0
__UpperCamelCase =0
for shard in range(0 , len(__UpperCamelCase ) , args.shard_size ):
__UpperCamelCase =grouped_dataset[shard : shard + args.shard_size]
__UpperCamelCase =len(dataset_snapshot['''input_ids'''] )
__UpperCamelCase =os.path.join(__UpperCamelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__UpperCamelCase =get_serialized_examples(__UpperCamelCase )
with tf.io.TFRecordWriter(__UpperCamelCase ) as out_file:
for i in range(len(__UpperCamelCase ) ):
__UpperCamelCase =serialized_examples[i]
out_file.write(__UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCamelCase , __UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=__UpperCamelCase )
if __name__ == "__main__":
__lowercase = parse_args()
main(args)
| 85
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = 8
# DPR tok
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , '''dpr_tokenizer''')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
# BART tok
__a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
__a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , '''bart_tokenizer''')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer'''))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer'''))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = os.path.join(self.tmpdirname , '''rag_tokenizer''')
__a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
__a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE)
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = RagTokenizer.from_pretrained('''facebook/rag-token-nq''')
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''')
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
| 49
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> str:
'''simple docstring'''
return
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
| 90
| 0
|
'''simple docstring'''
import functools
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
# Validation
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__UpperCamelCase ) != 3 or not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__UpperCamelCase ) == 0:
return 0
if min(__UpperCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__UpperCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
UpperCamelCase = set(__UpperCamelCase )
@functools.cache
def dynamic_programming(__UpperCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( lowerCamelCase ):
lowercase = """Salesforce/blip-image-captioning-base"""
lowercase = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase = """image_captioner"""
lowercase = AutoModelForVisionaSeq
lowercase = ["""image"""]
lowercase = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
| 183
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 38
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Tuple = ShapEImgaImgPipeline
snake_case__ : Optional[Any] = ["""image"""]
snake_case__ : Union[str, Any] = ["""image"""]
snake_case__ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case__ : List[str] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Union[str, Any] ):
return 8
@property
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase )
return model
@property
def _A ( self : str ):
UpperCamelCase :Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase :Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase :int = PriorTransformer(**__lowerCamelCase )
return model
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase :str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase )
return model
def _A ( self : str ):
UpperCamelCase :int = self.dummy_prior
UpperCamelCase :Any = self.dummy_image_encoder
UpperCamelCase :Dict = self.dummy_image_processor
UpperCamelCase :List[Any] = self.dummy_renderer
UpperCamelCase :int = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
UpperCamelCase :Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ):
UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Dict = """cpu"""
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :int = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :Dict = output.images[0]
UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase :Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self : List[Any] ):
UpperCamelCase :str = torch_device == """cpu"""
UpperCamelCase :int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = 1
UpperCamelCase :int = 2
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase :str = batch_size * [inputs[key]]
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCamelCase :List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 38
| 1
|
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( a_ , a_ , a_ ) -> float:
lowerCAmelCase_ = x
lowerCAmelCase_ = y
for step in range(a_ ): # noqa: B007
lowerCAmelCase_ = a * a - b * b + x
lowerCAmelCase_ = 2 * a * b + y
lowerCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( a_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( a_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) )
def lowerCamelCase ( a_ = 800 , a_ = 600 , a_ = -0.6 , a_ = 0 , a_ = 3.2 , a_ = 50 , a_ = True , ) -> Image.Image:
lowerCAmelCase_ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(a_ ):
for image_y in range(a_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ = figure_width / image_width * image_height
lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ = get_distance(a_ , a_ , a_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ = get_color_coded_rgb(a_ )
else:
lowerCAmelCase_ = get_black_and_white_rgb(a_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 371
|
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__magic_name__ = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''ernie_m'''
__lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowerCAmelCase__ = 2_5_0_0_0_2 , lowerCAmelCase__ = 7_6_8 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 3_0_7_2 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 5_1_4 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1E-05 , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = classifier_dropout
__SCREAMING_SNAKE_CASE = is_decoder
__SCREAMING_SNAKE_CASE = act_dropout
| 100
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = KandinskyVaaImgaImgPipeline
__lowercase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''']
__lowercase : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__lowercase : Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase : Tuple = False
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return self.time_input_dim
@property
def snake_case_ ( self):
return self.time_input_dim * 4
@property
def snake_case_ ( self):
return 1_0_0
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**lowerCAmelCase__)
return model
@property
def snake_case_ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__SCREAMING_SNAKE_CASE = DDIMScheduler(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase__)
# create init_image
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""").resize((2_5_6, 2_5_6))
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
__SCREAMING_SNAKE_CASE = """A red cartoon frog, 4k"""
__SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""").manual_seed(0)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__SCREAMING_SNAKE_CASE = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 100
| 1
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
lowerCamelCase = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : DatasetInfo ):
'''simple docstring'''
lowerCamelCase = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
lowerCamelCase = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , """dataset_info.json""" ) )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowerCamelCase = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCamelCase = yaml.safe_dump(lowerCamelCase__ )
lowerCamelCase = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = DatasetInfo()
lowerCamelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : DatasetInfosDict ):
'''simple docstring'''
lowerCamelCase = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
lowerCamelCase = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCamelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCamelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , """README.md""" ) )
| 66
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A , A )
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Optional[int]:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **A )
def __A ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=A , padding_value=1.0 )
lowerCamelCase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(A , return_tensors="""np""" )
lowerCamelCase = processor(images=A , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=A )
lowerCamelCase = tokenizer(A , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(A )
lowerCamelCase = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66
| 1
|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = '''poolformer'''
def __init__( self, A=3, A=16, A=16, A=3, A=4.0, A=[2, 2, 6, 2], A=[64, 128, 320, 512], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[2, 1, 1, 1], A=4, A=0.0, A="gelu", A=True, A=1E-5, A=0.02, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = stride
SCREAMING_SNAKE_CASE : Dict = padding
SCREAMING_SNAKE_CASE : List[Any] = pool_size
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Any = mlp_ratio
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[Any] = patch_sizes
SCREAMING_SNAKE_CASE : Any = strides
SCREAMING_SNAKE_CASE : Union[str, Any] = num_encoder_blocks
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[str] = use_layer_scale
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE : Any = initializer_range
super().__init__(**A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = version.parse('''1.11''' )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 2E-3
| 367
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = '''visual_bert'''
def __init__( self, A=30_522, A=768, A=512, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=False, A=True, A=1, A=0, A=2, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : int = visual_embedding_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = bypass_transformer
SCREAMING_SNAKE_CASE : Any = special_visual_initialize
| 246
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 , _lowerCamelCase : int = 10 ) -> int:
lowerCamelCase_ = defaultdict(_lowerCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_lowerCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = prime_sieve(_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = '''pt'''
elif is_tf_available():
__A = '''tf'''
else:
__A = '''jax'''
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Any = PerceiverTokenizer
UpperCamelCase_ : str = False
def UpperCamelCase_ ( self : str ) -> List[Any]:
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[Any]:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCamelCase_ ( self : Optional[int] , **A__ : Union[str, Any] ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Optional[int] , A__ : Union[str, Any] , A__ : Any=False , A__ : int=20 , A__ : Tuple=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_snake_case = []
for i in range(len(A__ ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=A__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda A__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , A__ ) )
_snake_case = list(filter(lambda A__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A__ ) , A__ ) )
if max_length is not None and len(A__ ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(A__ ) < min_length and len(A__ ) > 0:
while len(A__ ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ )
if " " not in output_txt and len(A__ ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A__ )
)
if with_prefix_space:
_snake_case = ''' ''' + output_txt
_snake_case = tokenizer.encode(A__ , add_special_tokens=A__ )
return output_txt, output_ids
def UpperCamelCase_ ( self : Optional[Any] ) -> str:
_snake_case = self.perceiver_tokenizer
_snake_case = '''Unicode €.'''
_snake_case = tokenizer(A__ )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , A__ )
# decoding
_snake_case = tokenizer.decode(A__ )
self.assertEqual(A__ , '''[CLS]Unicode €.[SEP]''' )
_snake_case = tokenizer('''e è é ê ë''' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , A__ )
# decoding
_snake_case = tokenizer.decode(A__ )
self.assertEqual(A__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCamelCase_ ( self : List[Any] ) -> int:
_snake_case = self.perceiver_tokenizer
_snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(A__ , padding=A__ , return_tensors=A__ )
self.assertIsInstance(A__ , A__ )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A__ , A__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = self.perceiver_tokenizer
_snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_snake_case = tokenizer(A__ , padding=A__ , return_tensors=A__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , A__ )
self.assertIn('''attention_mask''' , A__ )
self.assertNotIn('''decoder_input_ids''' , A__ )
self.assertNotIn('''decoder_attention_mask''' , A__ )
def UpperCamelCase_ ( self : Optional[int] ) -> Union[str, Any]:
_snake_case = self.perceiver_tokenizer
_snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
_snake_case = tokenizer(
text_target=A__ , max_length=32 , padding='''max_length''' , truncation=A__ , return_tensors=A__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCamelCase_ ( self : int ) -> Optional[int]:
# safety check on max_len default value so we are sure the test works
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ''' He is very happy, UNwant\u00E9d,running'''
_snake_case = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
_snake_case = tokenizer.__class__.from_pretrained(A__ )
_snake_case = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
shutil.rmtree(A__ )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
_snake_case = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
_snake_case = tokenizer.__class__.from_pretrained(A__ )
_snake_case = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(A__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A__ )
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A__ )
with open(os.path.join(A__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
_snake_case = json.load(A__ )
with open(os.path.join(A__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
_snake_case = json.load(A__ )
_snake_case = [f"""<extra_id_{i}>""" for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
_snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(A__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A__ , A__ )
with open(os.path.join(A__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A__ , A__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
A__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=A__ )]
_snake_case = tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCamelCase_ ( self : int ) -> Tuple:
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def UpperCamelCase_ ( self : str ) -> Union[str, Any]:
pass
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
pass
def UpperCamelCase_ ( self : str ) -> List[Any]:
pass
def UpperCamelCase_ ( self : Any ) -> str:
pass
def UpperCamelCase_ ( self : Tuple ) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_snake_case = self.get_tokenizers(fast=A__ , do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
_snake_case = tokenizer.convert_tokens_to_string(A__ )
self.assertIsInstance(A__ , A__ )
| 360
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 278
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.