code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import os
import sys
lowerCamelCase : Dict = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase : Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : str , **_UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : Tuple , **_UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : List[str] , **_UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : Any , **_UpperCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase : Tuple , **_UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 405
|
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(UpperCamelCase__, UpperCamelCase__ )
return actual_power(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 240
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaPriorPipeline
__magic_name__ = ["""prompt"""]
__magic_name__ = ["""prompt""", """negative_prompt"""]
__magic_name__ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
__magic_name__ = False
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return 3_2
@property
def _lowerCamelCase ( self ) -> Optional[int]:
return 3_2
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def _lowerCamelCase ( self ) -> int:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return 1_0_0
@property
def _lowerCamelCase ( self ) -> List[Any]:
_A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def _lowerCamelCase ( self ) -> int:
torch.manual_seed(0 )
_A : List[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
_A : Optional[int] = PriorTransformer(**UpperCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_A : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_A : Tuple = CLIPVisionModelWithProjection(UpperCAmelCase__ )
return model
@property
def _lowerCamelCase ( self ) -> List[str]:
_A : str = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : str = self.dummy_prior
_A : Any = self.dummy_image_encoder
_A : int = self.dummy_text_encoder
_A : Any = self.dummy_tokenizer
_A : Union[str, Any] = self.dummy_image_processor
_A : Union[str, Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , clip_sample_range=1_0.0 , )
_A : List[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ) -> Optional[int]:
if str(UpperCAmelCase__ ).startswith('''mps''' ):
_A : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_A : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_A : Union[str, Any] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self ) -> Any:
_A : List[str] = '''cpu'''
_A : List[str] = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**UpperCAmelCase__ )
_A : Union[str, Any] = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : List[str] = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
_A : Tuple = output.image_embeds
_A : List[str] = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
_A : Union[str, Any] = image[0, -1_0:]
_A : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_A : List[str] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowerCamelCase ( self ) -> int:
_A : Optional[Any] = torch_device == '''cpu'''
_A : Union[str, Any] = True
_A : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
@skip_mps
def _lowerCamelCase ( self ) -> int:
_A : List[Any] = torch_device == '''cpu'''
_A : str = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
| 417
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''ConvNextFeatureExtractor''']
__UpperCamelCase : Union[str, Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 417
| 1
|
'''simple docstring'''
import numpy as np
from PIL import Image
def A__ ( A_ , A_ , A_ ) -> Optional[int]:
_lowercase = np.array(lowerCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_lowercase = 0
_lowercase = 0
_lowercase = 0
_lowercase = 0
# compute the shape of the output matrix
_lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_lowercase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_lowercase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowercase = 0
_lowercase = 0
return updated_arr
def A__ ( A_ , A_ , A_ ) -> List[Any]:
_lowercase = np.array(lowerCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_lowercase = 0
_lowercase = 0
_lowercase = 0
_lowercase = 0
# compute the shape of the output matrix
_lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_lowercase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_lowercase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowercase = 0
_lowercase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__magic_name__ : Any = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 497
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [0] * no_of_processes
lowerCAmelCase__ : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCamelCase_ ):
lowerCAmelCase__ : Dict = burst_time[i]
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[Any] = 9_9_9_9_9_9_9_9_9
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCamelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase__ : Any = remaining_time[j]
lowerCAmelCase__ : str = j
lowerCAmelCase__ : List[Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase__ : Optional[Any] = remaining_time[short]
if minm == 0:
lowerCAmelCase__ : Optional[int] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase__ : int = False
# Find finish time of current process
lowerCAmelCase__ : Tuple = increment_time + 1
# Calculate waiting time
lowerCAmelCase__ : Dict = finish_time - arrival_time[short]
lowerCAmelCase__ : Optional[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase__ : Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [0] * no_of_processes
for i in range(lowerCamelCase_ ):
lowerCAmelCase__ : Dict = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Optional[int] = 0
for i in range(lowerCamelCase_ ):
lowerCAmelCase__ : Tuple = total_waiting_time + waiting_time[i]
lowerCAmelCase__ : str = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
snake_case = int(input())
snake_case = [0] * no_of_processes
snake_case = [0] * no_of_processes
snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
snake_case , snake_case = map(int, input().split())
snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case = burst_time
snake_case = no_of_processes
snake_case = waiting_time
snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 378
| 0
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase_ : Dict = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
def __A ( UpperCAmelCase ,UpperCAmelCase ,) -> List[Any]:
'''simple docstring'''
import pyspark
def generate_fn():
_UpperCamelCase : List[Any] = df.select("*" ,pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
_UpperCamelCase : Tuple = df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" )
_UpperCamelCase : Optional[int] = partition_df.collect()
_UpperCamelCase : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : Union[str, Any]=None , ) ->str:
'''simple docstring'''
_UpperCamelCase : List[Any] = df
_UpperCamelCase : Any = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCamelCase : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ) ->List[str]:
'''simple docstring'''
yield from self.generate_examples_fn()
def snake_case__ ( self : List[str] , lowercase__ : np.random.Generator ) ->"SparkExamplesIterable":
'''simple docstring'''
_UpperCamelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase__ )
return SparkExamplesIterable(self.df , partition_order=lowercase__ )
def snake_case__ ( self : Optional[Any] , lowercase__ : int , lowercase__ : int ) ->"SparkExamplesIterable":
'''simple docstring'''
_UpperCamelCase : Tuple = self.split_shard_indices_by_worker(lowercase__ , lowercase__ )
return SparkExamplesIterable(self.df , partition_order=lowercase__ )
@property
def snake_case__ ( self : Any ) ->int:
'''simple docstring'''
return len(self.partition_order )
class SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase__ = SparkConfig
def __init__( self : Any , lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : str = None , lowercase__ : str = None , **lowercase__ : Dict , ) ->Optional[Any]:
'''simple docstring'''
import pyspark
_UpperCamelCase : Any = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCamelCase : List[str] = df
_UpperCamelCase : Union[str, Any] = working_dir
super().__init__(
cache_dir=lowercase__ , config_name=str(self.df.semanticHash() ) , **lowercase__ , )
def snake_case__ ( self : Any ) ->int:
'''simple docstring'''
def create_cache_and_write_probe(lowercase__ : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase__ )
_UpperCamelCase : Dict = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCamelCase : int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def snake_case__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self : Union[str, Any] , lowercase__ : datasets.download.download_manager.DownloadManager ) ->Any:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case__ ( self : List[str] , lowercase__ : str ) ->str:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowercase__ : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
_UpperCamelCase : str = self.df.count()
_UpperCamelCase : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCamelCase : List[str] = (
self.df.limit(lowercase__ )
.repartition(1 )
.mapInArrow(lowercase__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCamelCase : Any = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCamelCase : Optional[int] = min(lowercase__ , int(approx_total_size / max_shard_size ) )
_UpperCamelCase : Optional[int] = self.df.repartition(lowercase__ )
def snake_case__ ( self : List[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : int , ) ->Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
_UpperCamelCase : List[Any] = ParquetWriter if file_format == "parquet" else ArrowWriter
_UpperCamelCase : Tuple = os.path.join(self._working_dir , os.path.basename(lowercase__ ) ) if self._working_dir else fpath
_UpperCamelCase : List[str] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCamelCase : List[Any] = self.config.features
_UpperCamelCase : Optional[int] = self._writer_batch_size
_UpperCamelCase : int = self._fs.storage_options
def write_arrow(lowercase__ : Tuple ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCamelCase : str = pyspark.TaskContext().taskAttemptId()
_UpperCamelCase : Union[str, Any] = next(lowercase__ , lowercase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = writer_class(
features=lowercase__ , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , )
_UpperCamelCase : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCamelCase , _UpperCamelCase : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
_UpperCamelCase : int = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , )
_UpperCamelCase : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(lowercase__ )
if writer._num_bytes > 0:
_UpperCamelCase , _UpperCamelCase : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase__ ) ):
_UpperCamelCase : List[Any] = os.path.join(os.path.dirname(lowercase__ ) , os.path.basename(lowercase__ ) )
shutil.move(lowercase__ , lowercase__ )
_UpperCamelCase : Dict = (
self.df.mapInArrow(lowercase__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case__ ( self : int , lowercase__ : "datasets.SplitGenerator" , lowercase__ : str = "arrow" , lowercase__ : Optional[Union[str, int]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Dict , ) ->str:
'''simple docstring'''
self._validate_cache_dir()
_UpperCamelCase : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase__ )
_UpperCamelCase : int = not is_remote_filesystem(self._fs )
_UpperCamelCase : Optional[int] = os.path.join if is_local else posixpath.join
_UpperCamelCase : Tuple = "-TTTTT-SSSSS-of-NNNNN"
_UpperCamelCase : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_UpperCamelCase : Any = path_join(self._output_dir , lowercase__ )
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = []
for task_id, content in self._prepare_split_single(lowercase__ , lowercase__ , lowercase__ ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Any = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase__ )
_UpperCamelCase : Dict = total_num_examples
_UpperCamelCase : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_UpperCamelCase : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCamelCase : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase__ : int , lowercase__ : int , lowercase__ : int , ):
rename(
lowercase__ , fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , f'''{global_shard_id:05d}''' ).replace("NNNNN" , f'''{total_shards:05d}''' ) , )
_UpperCamelCase : int = []
_UpperCamelCase : List[Any] = 0
for i in range(len(lowercase__ ) ):
_UpperCamelCase , _UpperCamelCase : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase__ , len(lowercase__ ) ).map(lambda lowercase__ : _rename_shard(*lowercase__ ) ).collect()
else:
# don't use any pattern
_UpperCamelCase : Dict = 0
_UpperCamelCase : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace(lowercase__ , "" ) , )
def snake_case__ ( self : Optional[int] , lowercase__ : "datasets.SplitGenerator" , ) ->SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 204
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowerCAmelCase_ : str = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowerCAmelCase_ : Tuple = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCAmelCase_ : str = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ : Any = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ : Optional[Any] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 204
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __A ( a_ :Optional[int] , a_ :List[str] , a_ :Any=None , a_ :Tuple=None , a_ :int=None , a_ :Optional[Any]=None , a_ :int=None , a_ :str=None , ) -> Tuple:
if attention_mask is None:
__a : Dict = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
__a : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
__a : str = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__a : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__a : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.0_2 , ):
__a : Union[str, Any] = parent
__a : Optional[int] = batch_size
__a : Union[str, Any] = seq_length
__a : Tuple = is_training
__a : Union[str, Any] = use_labels
__a : Optional[int] = vocab_size
__a : Tuple = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Any = intermediate_size
__a : str = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : str = eos_token_id
__a : Tuple = pad_token_id
__a : List[str] = bos_token_id
__a : Optional[int] = initializer_range
def _lowerCamelCase ( self ):
__a : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : int = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__a : Tuple = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self ):
__a , __a : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = 20
__a : List[Any] = model_class_name(_UpperCAmelCase )
__a : int = model.encode(inputs_dict['''input_ids'''] )
__a , __a : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : str = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__a : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : int = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[int] = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = 20
__a : str = model_class_name(_UpperCAmelCase )
__a : Dict = model.encode(inputs_dict['''input_ids'''] )
__a , __a : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Tuple = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = 99
def _lowerCamelCase ( self ):
__a : Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a : List[Any] = input_ids.shape[0]
__a : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ):
__a , __a , __a : List[str] = self._get_config_and_data()
__a : List[str] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Tuple = lm_model(input_ids=_UpperCAmelCase )
__a : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a : List[Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a : Dict = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a : Dict = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a : Union[str, Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Tuple = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__a : Union[str, Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
__a : List[Any] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : int = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
__a : List[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : int = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Tuple = model_class(_UpperCAmelCase )
__a : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__a : Dict = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('''JIT Enabled''' ):
__a : Any = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : Optional[int] = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : Tuple = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
__a : Tuple = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 52
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase_ = {
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
lowerCamelCase_ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = []
snake_case = []
def __init__( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]="<s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = legacy_behaviour
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
_A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A = 1
_A = len(self.sp_model )
_A = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
_A = {v: k for k, v in self.lang_code_to_id.items()}
_A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_A = src_lang if src_lang is not None else "eng_Latn"
_A = self.lang_code_to_id[self._src_lang]
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_A = src_lang
_A = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
_A = self.convert_tokens_to_ids(__UpperCAmelCase )
_A = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "eng_Latn" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "fra_Latn" , **__UpperCAmelCase : Any , ):
'''simple docstring'''
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
def lowerCAmelCase ( self : int , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
| 330
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__a , """num_heads""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : List[Any] , __a : Dict=13 , __a : Optional[Any]=64 , __a : Optional[int]=3 , __a : Dict=[16, 48, 96] , __a : Dict=[1, 3, 6] , __a : Dict=[1, 2, 10] , __a : Tuple=[7, 3, 3] , __a : List[str]=[4, 2, 2] , __a : Optional[int]=[2, 1, 1] , __a : List[Any]=[2, 2, 2] , __a : List[str]=[False, False, True] , __a : int=[0.0, 0.0, 0.0] , __a : Union[str, Any]=0.02 , __a : Any=1E-12 , __a : Union[str, Any]=True , __a : List[Any]=True , __a : List[Any]=2 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = parent
__lowercase : Optional[Any] = batch_size
__lowercase : int = image_size
__lowercase : str = patch_sizes
__lowercase : Dict = patch_stride
__lowercase : Optional[Any] = patch_padding
__lowercase : Dict = is_training
__lowercase : Tuple = use_labels
__lowercase : List[str] = num_labels
__lowercase : Union[str, Any] = num_channels
__lowercase : Any = embed_dim
__lowercase : int = num_heads
__lowercase : Dict = stride_kv
__lowercase : Optional[Any] = depth
__lowercase : int = cls_token
__lowercase : Dict = attention_drop_rate
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[str] = layer_norm_eps
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Any = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , __a : Any , __a : List[str] , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = TFCvtModel(config=__a )
__lowercase : str = model(__a , training=__a )
__lowercase : Dict = (self.image_size, self.image_size)
__lowercase , __lowercase : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Optional[int] , __a : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Optional[int] = TFCvtForImageClassification(__a )
__lowercase : Optional[Any] = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[Any] = config_and_inputs
__lowercase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_A : Dict = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_A : int = False
_A : Any = False
_A : List[str] = False
_A : Optional[Any] = False
_A : Tuple = False
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = TFCvtModelTester(self )
__lowercase : str = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Tuple = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Union[str, Any] = [*signature.parameters.keys()]
__lowercase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(__a : Tuple , __a : int , __a : List[Any] ):
__lowercase : Optional[Any] = model_class(__a )
__lowercase : str = model(**self._prepare_for_class(__a , __a ) )
__lowercase : str = outputs.hidden_states
__lowercase : int = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase : Any = self.default_image_processor
__lowercase : Optional[int] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=__a , return_tensors="""tf""" )
# forward pass
__lowercase : List[str] = model(**__a )
# verify the logits
__lowercase : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Dict = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list:
'''simple docstring'''
snake_case : Tuple = [0] * len(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
# use last results for better performance - dynamic programming
snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case : Optional[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case : int = j
return prefix_result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
return max(prefix_function(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652
| 0
|
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE : int = grid[0]
for row_n in range(1 , len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : List[str] = grid[row_n]
SCREAMING_SNAKE_CASE : int = fill_row(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = grid[row_n]
return grid[-1][-1]
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCamelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 79
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( lowercase_ : Callable , lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE : Dict = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE : List[Any] = ya
__SCREAMING_SNAKE_CASE : Dict = xa
for k in range(lowercase_ ):
__SCREAMING_SNAKE_CASE : str = y[k] + step_size * ode_func(lowercase_ , y[k] )
__SCREAMING_SNAKE_CASE : int = y[k] + (
(step_size / 2) * (ode_func(lowercase_ , y[k] ) + ode_func(x + step_size , lowercase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674
| 1
|
"""simple docstring"""
def lowerCAmelCase__ ( lowerCamelCase__ ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
A = [True] * (num + 1)
A = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase__ ):
A = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Tuple = """decision_transformer"""
lowerCAmelCase_ : Tuple = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , snake_case : int=17 , snake_case : int=4 , snake_case : Union[str, Any]=128 , snake_case : Optional[Any]=4_096 , snake_case : List[Any]=True , snake_case : str=1 , snake_case : str=1_024 , snake_case : List[str]=3 , snake_case : str=1 , snake_case : Any=None , snake_case : Optional[int]="relu" , snake_case : List[Any]=0.1 , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Union[str, Any]=1E-5 , snake_case : Optional[int]=0.02 , snake_case : List[Any]=True , snake_case : Tuple=True , snake_case : Optional[Any]=50_256 , snake_case : List[Any]=50_256 , snake_case : List[str]=False , snake_case : Any=False , **snake_case : List[str] , ) -> List[Any]:
'''simple docstring'''
A = state_dim
A = act_dim
A = hidden_size
A = max_ep_len
A = action_tanh
A = vocab_size
A = n_positions
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = scale_attn_by_inverse_layer_idx
A = reorder_and_upcast_attn
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 109
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : int = int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ : int = x_den * y_den * z_den
UpperCamelCase__ : int = gcd(UpperCamelCase__ , UpperCamelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = 3_5 ):
UpperCamelCase__ : set = set()
UpperCamelCase__ : int
UpperCamelCase__ : Fraction = Fraction(0 )
UpperCamelCase__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ : List[str] = x_num * y_den + x_den * y_num
UpperCamelCase__ : int = x_den * y_den
UpperCamelCase__ : int = gcd(UpperCamelCase__ , UpperCamelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ : int = add_three(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
unique_s.add(UpperCamelCase__ )
# n=2
UpperCamelCase__ : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ : Any = x_den * x_den * y_den * y_den
if is_sq(UpperCamelCase__ ) and is_sq(UpperCamelCase__ ):
UpperCamelCase__ : List[str] = int(sqrt(UpperCamelCase__ ) )
UpperCamelCase__ : Any = int(sqrt(UpperCamelCase__ ) )
UpperCamelCase__ : str = gcd(UpperCamelCase__ , UpperCamelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ : int = add_three(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
unique_s.add(UpperCamelCase__ )
# n=-1
UpperCamelCase__ : Tuple = x_num * y_num
UpperCamelCase__ : List[str] = x_den * y_num + x_num * y_den
UpperCamelCase__ : Union[str, Any] = gcd(UpperCamelCase__ , UpperCamelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ : List[Any] = add_three(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
unique_s.add(UpperCamelCase__ )
# n=2
UpperCamelCase__ : Optional[int] = x_num * x_num * y_num * y_num
UpperCamelCase__ : int = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCamelCase__ ) and is_sq(UpperCamelCase__ ):
UpperCamelCase__ : Tuple = int(sqrt(UpperCamelCase__ ) )
UpperCamelCase__ : Any = int(sqrt(UpperCamelCase__ ) )
UpperCamelCase__ : Any = gcd(UpperCamelCase__ , UpperCamelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ : Dict = add_three(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
unique_s.add(UpperCamelCase__ )
for num, den in unique_s:
total += Fraction(UpperCamelCase__ , UpperCamelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 285
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = tempfile.mkdtemp()
UpperCamelCase__ : str = BlipImageProcessor()
UpperCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCamelCase__ : List[Any] = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase__ : Dict = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ : str = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase__ : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
UpperCamelCase__ : List[Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : Any = self.get_tokenizer()
UpperCamelCase__ : Tuple = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = '''lower newer'''
UpperCamelCase__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Dict = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = '''lower newer'''
UpperCamelCase__ : Any = self.prepare_image_inputs()
UpperCamelCase__ : Any = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Tuple = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Tuple = processor.batch_decode(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = '''lower newer'''
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 285
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''AutoImageProcessor'''
__magic_name__ = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> int:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor
def __call__( self : List[str] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : List[Any] ) -> List[str]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
UpperCAmelCase_ : str = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
UpperCAmelCase_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> str:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 463
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''levit'''
def __init__( self : List[str] , lowerCAmelCase_ : int=224 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Tuple=[128, 256, 384] , lowerCAmelCase_ : Optional[int]=[4, 8, 12] , lowerCAmelCase_ : str=[4, 4, 4] , lowerCAmelCase_ : Dict=[16, 16, 16] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[int]=[2, 2, 2] , lowerCAmelCase_ : Any=[2, 2, 2] , lowerCAmelCase_ : int=0.0_2 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : List[str] = padding
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = key_dim
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Optional[int] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
| 463
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 275
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''swinv2'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: List[str] = image_size
UpperCAmelCase_: List[str] = patch_size
UpperCAmelCase_: str = num_channels
UpperCAmelCase_: Optional[int] = embed_dim
UpperCAmelCase_: str = depths
UpperCAmelCase_: Optional[Any] = len(A__ )
UpperCAmelCase_: Optional[Any] = num_heads
UpperCAmelCase_: Dict = window_size
UpperCAmelCase_: Dict = mlp_ratio
UpperCAmelCase_: Optional[Any] = qkv_bias
UpperCAmelCase_: Optional[Any] = hidden_dropout_prob
UpperCAmelCase_: Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_: int = drop_path_rate
UpperCAmelCase_: Union[str, Any] = hidden_act
UpperCAmelCase_: Any = use_absolute_embeddings
UpperCAmelCase_: Optional[int] = layer_norm_eps
UpperCAmelCase_: str = initializer_range
UpperCAmelCase_: Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Union[str, Any] = int(embed_dim * 2 ** (len(A__ ) - 1) )
UpperCAmelCase_: str = (0, 0, 0, 0)
| 137
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
if n == 0:
return 0
__lowercase = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowercase = max(
UpperCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase__ ) )
return max_revue
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list , UpperCamelCase__ : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__lowercase = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowercase = max(
UpperCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase__ , UpperCamelCase__ ) , )
__lowercase = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__lowercase = [float("""-inf""" ) for _ in range(n + 1 )]
__lowercase = 0
for i in range(1 , n + 1 ):
__lowercase = max_rev[i]
for j in range(1 , i + 1 ):
__lowercase = max(UpperCamelCase__ , prices[j - 1] + max_rev[i - j] )
__lowercase = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
if n < 0:
__lowercase = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(UpperCamelCase__ )
if n > len(UpperCamelCase__ ):
__lowercase = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(UpperCamelCase__ )}'''
)
raise ValueError(UpperCamelCase__ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = [6, 10, 12, 15, 20, 23]
__lowercase = len(UpperCamelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__lowercase = 36
__lowercase = top_down_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = bottom_up_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = naive_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 442
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCamelCase__ ( _a ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = 5
# Realm tok
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(A_ , exist_ok=A_ )
__lowercase = os.path.join(A_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(A_ , exist_ok=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=A_ , )
return block_records
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3] , dtype="""long""" )
__lowercase = tokenizer(["""Test question"""] ).input_ids
__lowercase = tokenizer(
["""the fourth"""] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="""np""" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3, 5] , dtype="""long""" )
__lowercase = tokenizer(["""Test question"""] ).input_ids
__lowercase = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__lowercase = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowercase = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 442
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Optional[int] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a__ : Optional[int] = logging.get_logger(__name__)
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'mask2former'
snake_case_ = ['swin']
snake_case_ = {'hidden_size': 'hidden_dim'}
def __init__( self : str , a_ : Optional[Dict] = None , a_ : int = 2_56 , a_ : int = 2_56 , a_ : int = 2_56 , a_ : int = 10_24 , a_ : str = "relu" , a_ : int = 6 , a_ : int = 10 , a_ : int = 8 , a_ : float = 0.0 , a_ : int = 20_48 , a_ : bool = False , a_ : bool = False , a_ : int = 4 , a_ : int = 2_55 , a_ : int = 1_00 , a_ : float = 0.1 , a_ : float = 2.0 , a_ : float = 5.0 , a_ : float = 5.0 , a_ : int = 1_25_44 , a_ : float = 3.0 , a_ : float = 0.7_5 , a_ : float = 0.0_2 , a_ : float = 1.0 , a_ : bool = True , a_ : List[int] = [4, 8, 16, 32] , a_ : bool = None , **a_ : Optional[Any] , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowerCamelCase__ = CONFIG_MAPPING["""swin"""](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=a_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(a_ , a_ ):
lowerCamelCase__ = backbone_config.pop("""model_type""" )
lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ = config_class.from_dict(a_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
lowerCamelCase__ = backbone_config
lowerCamelCase__ = feature_size
lowerCamelCase__ = mask_feature_size
lowerCamelCase__ = hidden_dim
lowerCamelCase__ = encoder_feedforward_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = dim_feedforward
lowerCamelCase__ = pre_norm
lowerCamelCase__ = enforce_input_projection
lowerCamelCase__ = common_stride
lowerCamelCase__ = ignore_value
lowerCamelCase__ = num_queries
lowerCamelCase__ = no_object_weight
lowerCamelCase__ = class_weight
lowerCamelCase__ = mask_weight
lowerCamelCase__ = dice_weight
lowerCamelCase__ = train_num_points
lowerCamelCase__ = oversample_ratio
lowerCamelCase__ = importance_sample_ratio
lowerCamelCase__ = init_std
lowerCamelCase__ = init_xavier_std
lowerCamelCase__ = use_auxiliary_loss
lowerCamelCase__ = feature_strides
lowerCamelCase__ = output_auxiliary_logits
lowerCamelCase__ = decoder_layers
super().__init__(**a_ )
@classmethod
def _UpperCamelCase ( cls : Any , a_ : PretrainedConfig , **a_ : Dict ):
"""simple docstring"""
return cls(
backbone_config=a_ , **a_ , )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.backbone_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 165
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Tuple = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'speech_to_text_2'
snake_case_ = ['past_key_values']
snake_case_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , a_ : List[str]=1_00_00 , a_ : Optional[int]=6 , a_ : Optional[Any]=20_48 , a_ : List[Any]=4 , a_ : Any=0.0 , a_ : List[Any]=True , a_ : Any="relu" , a_ : int=2_56 , a_ : int=0.1 , a_ : List[Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : List[str]=2 , a_ : List[str]=True , a_ : Dict=1 , a_ : Tuple=0 , a_ : Optional[int]=2 , a_ : Optional[int]=10_24 , **a_ : Tuple , ):
"""simple docstring"""
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_model
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = max_target_positions
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , **a_ , )
| 165
| 1
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = ComputeEnvironment.AMAZON_SAGEMAKER
A = True
A = "ml.p3.2xlarge"
A = "accelerate_sagemaker_execution_role"
A = "hf-sm"
A = "us-east-1"
A = 1
A = "accelerate-sagemaker-1"
A = "1.6"
A = "4.4"
A = "train.py"
A = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
A = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 418
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCamelCase : Any = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCamelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCamelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ : List[str] = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
lowerCamelCase_ : Any = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(UpperCamelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ : Optional[int] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCamelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
| 418
| 1
|
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : int=None , _lowercase : List[str]=None , _lowercase : Optional[Any]=None , _lowercase : List[Any]=None , _lowercase : int=None ):
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
def a ( self : Any , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Dict=None , _lowercase : Any=None , _lowercase : int=None ):
if red is not None:
__UpperCAmelCase = red
if green is not None:
__UpperCAmelCase = green
if blue is not None:
__UpperCAmelCase = blue
if red_edge is not None:
__UpperCAmelCase = red_edge
if nir is not None:
__UpperCAmelCase = nir
return True
def a ( self : Union[str, Any] , _lowercase : List[str]="" , _lowercase : Dict=None , _lowercase : int=None , _lowercase : Optional[Any]=None , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None ):
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
__UpperCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def a ( self : Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def a ( self : List[str] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def a ( self : List[Any] ):
return self.nir * (self.red / (self.green**2))
def a ( self : Optional[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def a ( self : Union[str, Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def a ( self : Optional[int] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def a ( self : Dict ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def a ( self : Union[str, Any] ):
return (self.nir - self.green) / (self.nir + self.green)
def a ( self : List[str] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def a ( self : Tuple ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def a ( self : int ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def a ( self : Optional[Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def a ( self : int , _lowercase : Tuple=0.08 , _lowercase : str=1.22 , _lowercase : Dict=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def a ( self : List[str] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def a ( self : List[str] ):
return (self.nir / self.green) - 1
def a ( self : Optional[Any] ):
return (self.nir / self.redEdge) - 1
def a ( self : Dict ):
return (self.red - self.blue) / self.red
def a ( self : Dict ):
__UpperCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def a ( self : Tuple ):
return self.nir - self.green
def a ( self : Union[str, Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def a ( self : Tuple ):
__UpperCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def a ( self : Optional[Any] , _lowercase : List[str]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def a ( self : int , _lowercase : Union[str, Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def a ( self : List[str] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def a ( self : List[str] , _lowercase : Tuple=None , _lowercase : Any=None ):
return (self.nir - b) / (a * self.red)
def a ( self : Tuple ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def a ( self : str ):
return (self.red + self.green + self.blue) / 30.5
def a ( self : Optional[Any] ):
return self.nir / self.red
def a ( self : Optional[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def a ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def a ( self : Union[str, Any] ):
return self.green / (self.nir + self.red + self.green)
def a ( self : List[Any] ):
return self.nir / (self.nir + self.red + self.green)
def a ( self : List[Any] ):
return self.red / (self.nir + self.red + self.green)
def a ( self : List[str] ):
return (self.green - self.red) / (self.green + self.red)
def a ( self : Any ):
return (self.red - self.green) / (self.red + self.green)
def a ( self : Any ):
__UpperCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def a ( self : int ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def a ( self : Tuple ):
return self.nir / self.red
def a ( self : List[Any] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def a ( self : Tuple ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 49
|
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """poolformer"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Any=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : int=4.0 , SCREAMING_SNAKE_CASE_ : List[str]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[str]=[6_4, 1_2_8, 3_2_0, 5_1_2] , SCREAMING_SNAKE_CASE_ : List[str]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_ : Tuple=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_ : Dict=[2, 1, 1, 1] , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : str="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[Any]=1E-5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Any = patch_size
lowerCAmelCase_ : str = stride
lowerCAmelCase_ : Union[str, Any] = padding
lowerCAmelCase_ : int = pool_size
lowerCAmelCase_ : Optional[Any] = hidden_sizes
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Tuple = patch_sizes
lowerCAmelCase_ : Union[str, Any] = strides
lowerCAmelCase_ : Any = num_encoder_blocks
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Union[str, Any] = use_layer_scale
lowerCAmelCase_ : Optional[Any] = layer_scale_init_value
lowerCAmelCase_ : Union[str, Any] = initializer_range
super().__init__(**SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return 2E-3
| 317
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if index == len(lowerCAmelCase__ ):
return True
# Recursive Step
for i in range(lowerCAmelCase__ ):
if valid_coloring(graph[index] , lowerCAmelCase__ , lowerCAmelCase__ ):
# Color current vertex
lowerCAmelCase_ : List[str] = i
# Validate coloring
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ):
return True
# Backtrack
lowerCAmelCase_ : Dict = -1
return False
def UpperCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : str = [-1] * len(lowerCAmelCase__ )
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 0 ):
return colored_vertices
return []
| 317
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def __magic_name__ ( lowercase ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowercase_ : Dict = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
lowercase_ : Optional[int] = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[str] = nlp
lowercase_ : Optional[int] = reader
@staticmethod
def snake_case__ ( snake_case__ ) -> Any:
"""simple docstring"""
lowercase_ : Any = parser.add_parser("""run""", help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""", choices=get_supported_tasks(), help="""Task to run""" )
run_parser.add_argument("""--input""", type=UpperCamelCase__, help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""", type=UpperCamelCase__, help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""", type=UpperCamelCase__, help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""", type=UpperCamelCase__, help="""Name or path to the model\'s config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""", type=UpperCamelCase__, help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""", type=UpperCamelCase__, help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""", )
run_parser.add_argument(
"""--format""", type=UpperCamelCase__, default="""infer""", choices=PipelineDataFormat.SUPPORTED_FORMATS, help="""Input format to read from""", )
run_parser.add_argument(
"""--device""", type=UpperCamelCase__, default=-1, help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""", )
run_parser.add_argument("""--overwrite""", action="""store_true""", help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=UpperCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = self._nlp, []
for entry in self._reader:
lowercase_ : str = nlp(**UpperCamelCase__ ) if self._reader.is_multi_columns else nlp(UpperCamelCase__ )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
outputs.append(UpperCamelCase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowercase_ : Dict = self._reader.save_binary(UpperCamelCase__ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(UpperCamelCase__ )
| 458
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
'''simple docstring'''
if "resnet-50" in model_name:
snake_case : int = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
snake_case : Any = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
snake_case : Optional[Any] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
snake_case : Tuple = '''panoptic''' in model_name
if is_panoptic:
snake_case : Tuple = 250
else:
snake_case : Any = 91
snake_case : str = '''huggingface/label-files'''
snake_case : Optional[Any] = '''coco-detection-id2label.json'''
snake_case : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = val
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = ''''''
if is_panoptic:
snake_case : Optional[Any] = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case : Optional[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : Any = in_proj_bias[:256]
snake_case : Dict = in_proj_weight[256:512, :]
snake_case : Tuple = in_proj_bias[256:512]
snake_case : Optional[int] = in_proj_weight[-256:, :]
snake_case : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case : Union[str, Any] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case : List[str] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : str = in_proj_bias[:256]
snake_case : Optional[int] = in_proj_weight[256:512, :]
snake_case : Optional[int] = in_proj_bias[256:512]
snake_case : Optional[Any] = in_proj_weight[-256:, :]
snake_case : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case : Union[str, Any] = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case : str = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case : Optional[Any] = in_proj_weight_cross_attn[:256, :]
snake_case : List[Any] = in_proj_bias_cross_attn[:256]
snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
snake_case : Any = in_proj_bias_cross_attn[256:512]
snake_case : List[str] = in_proj_weight_cross_attn[-256:, :]
snake_case : Tuple = in_proj_bias_cross_attn[-256:]
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ) -> Dict:
'''simple docstring'''
snake_case ,snake_case : Tuple = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
snake_case : Union[str, Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'Converting model {model_name}...' )
snake_case : Union[str, Any] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
snake_case : Optional[int] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
snake_case : Union[str, Any] = '''detr.''' + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case : List[str] = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case : int = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Any = val
# finally, create HuggingFace model and load state dict
snake_case : Optional[Any] = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
snake_case : List[str] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case : Optional[int] = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
snake_case : int = processor(images=prepare_img() , return_tensors='''pt''' )
snake_case : Optional[int] = encoding['''pixel_values''']
snake_case : str = detr(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowercase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
"""simple docstring"""
def A__ ( A__ ) -> bool:
'''simple docstring'''
_UpperCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A__ ( A__ = 5000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , A__ )]
for i, pentagonal_i in enumerate(A__ ):
for j in range(A__ , len(A__ ) ):
_UpperCAmelCase = pentagonal_nums[j]
_UpperCAmelCase = pentagonal_i + pentagonal_j
_UpperCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(A__ ) and is_pentagonal(A__ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 579
|
"""simple docstring"""
from timeit import timeit
def A__ ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def A__ ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A__ ( ) -> None:
'''simple docstring'''
def do_benchmark(A__ ) -> None:
_UpperCAmelCase = "import __main__ as z"
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
_UpperCAmelCase = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
_UpperCAmelCase = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 579
| 1
|
import re
from filelock import FileLock
try:
import nltk
a__ = True
except (ImportError, ModuleNotFoundError):
a__ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
re.sub("""<n>""" , """""" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 477
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE_ = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowercase (_lowerCAmelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE_ = importlib.import_module('''transformers''')
SCREAMING_SNAKE_CASE_ = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE_ = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 465
| 0
|
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE = False ) -> None:
"""simple docstring"""
A : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
A : Optional[int] = is_leaf
A : Any = prefix
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[str, str, str]:
"""simple docstring"""
A : List[str] = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.prefix == word:
A : Optional[int] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A : str = RadixNode(prefix=SCREAMING_SNAKE_CASE , is_leaf=SCREAMING_SNAKE_CASE )
else:
A : Dict = self.nodes[word[0]]
A, A, A : List[Any] = incoming_node.match(
SCREAMING_SNAKE_CASE )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A : str = remaining_prefix
A : Tuple = self.nodes[matching_string[0]]
A : List[str] = RadixNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = aux_node
if remaining_word == "":
A : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
A : int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
A, A, A : int = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
A : List[Any] = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
A, A, A : Union[str, Any] = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A : Any = list(self.nodes.values() )[0]
A : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
A : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A : Dict = False
# If there is 1 edge, we merge it with its child
else:
A : int = list(incoming_node.nodes.values() )[0]
A : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A : Dict = merging_node.nodes
return True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = '''banana bananas bandana band apple all beast'''.split()
A : List[Any] = RadixNode()
root.insert_many(snake_case__ )
assert all(root.find(snake_case__ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = RadixNode()
A : Tuple = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(snake_case__ )
print('''Words:''' , snake_case__ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 343
|
'''simple docstring'''
import numpy as np
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = int(np.ceil((x_end - xa) / h ) )
A : Optional[Any] = np.zeros((n + 1,) )
A : List[Any] = ya
A : Dict = xa
for k in range(snake_case__ ):
A : Union[str, Any] = f(snake_case__ , y[k] )
A : Optional[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A : List[Any] = f(x + h , y[k] + h * ka )
A : Any = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343
| 1
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : List[str] = 16
_snake_case : int = 32
def snake_case_ (UpperCamelCase : Accelerator , UpperCamelCase : DatasetDict , UpperCamelCase : List[int] , UpperCamelCase : List[int] , UpperCamelCase : int = 16 ):
'''simple docstring'''
_a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_a = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase ),
'''validation''': dataset['''train'''].select(UpperCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
UpperCamelCase , padding='''longest''' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = []
# Download the dataset
_a = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_a = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config['''num_epochs'''] )
_a = int(config['''seed'''] )
_a = int(config['''batch_size'''] )
_a = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
# New Code #
# Create our folds:
_a = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_a = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase ):
_a , _a , _a = get_fold_dataloaders(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**UpperCamelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_a = []
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a = torch.cat(UpperCamelCase , dim=0 )
_a = torch.stack(UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a = metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase , default=UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase , default=3 , help='''The number of splits to perform across the dataset''' )
_a = parser.parse_args()
_a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 22
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( a, a, a, a) ->int:
lowerCamelCase__ = s.rsplit(a, a)
return new.join(a)
def __UpperCamelCase ( a) ->int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def __UpperCamelCase ( a) ->List[str]:
lowerCamelCase__ = {}
lowerCamelCase__ = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ = key.replace(f"{group_key}.", f"{group_key}.group.")
if "res_path" in key:
lowerCamelCase__ = key.replace("res_path.", "res_path.path.")
if key.endswith(".w"):
lowerCamelCase__ = rreplace(a, ".w", ".weight", 1)
if key.endswith(".b"):
lowerCamelCase__ = rreplace(a, ".b", ".bias", 1)
lowerCamelCase__ = value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( a, a, a=None, a=True) ->Union[str, Any]:
from dall_e import Encoder
lowerCamelCase__ = Encoder()
if os.path.exists(a):
lowerCamelCase__ = torch.load(a)
else:
lowerCamelCase__ = torch.hub.load_state_dict_from_url(a)
if isinstance(a, a):
lowerCamelCase__ = ckpt.state_dict()
encoder.load_state_dict(a)
if config_path is not None:
lowerCamelCase__ = FlavaImageCodebookConfig.from_pretrained(a)
else:
lowerCamelCase__ = FlavaImageCodebookConfig()
lowerCamelCase__ = FlavaImageCodebook(a).eval()
lowerCamelCase__ = encoder.state_dict()
lowerCamelCase__ = upgrade_state_dict(a)
hf_model.load_state_dict(a)
lowerCamelCase__ = hf_model.state_dict()
lowerCamelCase__ = count_parameters(a)
lowerCamelCase__ = count_parameters(a)
assert torch.allclose(a, a, atol=1E-3)
if save_checkpoint:
hf_model.save_pretrained(a)
else:
return hf_state_dict
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 360
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ = logging.get_logger(__name__)
A_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = "longformer"
def __init__( self , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0522 , _lowerCAmelCase = 768 , _lowerCAmelCase = 12 , _lowerCAmelCase = 12 , _lowerCAmelCase = 3072 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = attention_window
lowerCamelCase__ = sep_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = onnx_export
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = True
@property
def __magic_name__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __magic_name__ ( self ):
lowerCamelCase__ = super().outputs
if self.task == "default":
lowerCamelCase__ = {0: "batch"}
return outputs
@property
def __magic_name__ ( self ):
return 1E-4
@property
def __magic_name__ ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
lowerCamelCase__ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowerCamelCase__ = 1
return inputs
| 360
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Any=[] ) -> Any:
_lowercase = size[0] - overlap_pixels * 2
_lowercase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase = np.pad(snake_case__ , mode='linear_ramp' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
_lowercase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[str] ) -> Dict:
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :[int] , snake_case__ :[int] ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :int , snake_case__ :[int] ) -> Dict:
_lowercase = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Dict , snake_case__ :str ) -> Tuple:
_lowercase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Tuple ) -> List[str]:
_lowercase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase = tile.crop(snake_case__ )
return tile
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Optional[int] ) -> Tuple:
_lowercase = n % d
return n - divisor
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : AutoencoderKL ,__A : CLIPTextModel ,__A : CLIPTokenizer ,__A : UNetaDConditionModel ,__A : DDPMScheduler ,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,__A : int = 350 ,) -> Union[str, Any]:
super().__init__(
vae=__A ,text_encoder=__A ,tokenizer=__A ,unet=__A ,low_res_scheduler=__A ,scheduler=__A ,max_noise_level=__A ,)
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Tuple ,__A : List[Any] ,__A : str ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : Optional[int] ,**__A : List[Any] ) -> int:
torch.manual_seed(0 )
_lowercase = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
_lowercase = add_overlap_rect(__A ,__A ,image.size )
_lowercase = image.crop(__A )
_lowercase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase = translated_slice_x - (original_image_slice / 2)
_lowercase = max(0 ,__A )
_lowercase = squeeze_tile(__A ,__A ,__A ,__A )
_lowercase = to_input.size
_lowercase = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
_lowercase = super(__A ,self ).__call__(image=__A ,**__A ).images[0]
_lowercase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
_lowercase = unsqueeze_tile(__A ,__A )
_lowercase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
_lowercase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowercase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=__A ) ,mode='L' ,)
final_image.paste(
__A ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,__A )
@torch.no_grad()
def __call__( self : List[Any] ,__A : Union[str, List[str]] ,__A : Union[PIL.Image.Image, List[PIL.Image.Image]] ,__A : int = 75 ,__A : float = 9.0 ,__A : int = 50 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,__A : int = 128 ,__A : int = 32 ,__A : int = 32 ,) -> int:
_lowercase = Image.new('RGB' ,(image.size[0] * 4, image.size[1] * 4) )
_lowercase = math.ceil(image.size[0] / tile_size )
_lowercase = math.ceil(image.size[1] / tile_size )
_lowercase = tcx * tcy
_lowercase = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A ,__A ,__A ,__A ,__A ,__A ,__A ,prompt=__A ,num_inference_steps=__A ,guidance_scale=__A ,noise_level=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,)
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# Run a demo
_lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
_lowercase = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='fp16' , torch_dtype=torch.floataa )
_lowercase = pipe.to('cuda' )
_lowercase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(snake_case__ :int ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase = pipe(image=snake_case__ , prompt='Black font, white background, vector' , noise_level=40 , callback=snake_case__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 67
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def lowercase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return TrainCommand(SCREAMING_SNAKE_CASE )
class a_ ( SCREAMING_SNAKE_CASE__ ):
@staticmethod
def A_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = logging.get_logger('transformers-cli/training' )
SCREAMING_SNAKE_CASE_ = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = args.output
SCREAMING_SNAKE_CASE_ = args.column_label
SCREAMING_SNAKE_CASE_ = args.column_text
SCREAMING_SNAKE_CASE_ = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
SCREAMING_SNAKE_CASE_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE_ = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
SCREAMING_SNAKE_CASE_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE_ = args.validation_split
SCREAMING_SNAKE_CASE_ = args.train_batch_size
SCREAMING_SNAKE_CASE_ = args.valid_batch_size
SCREAMING_SNAKE_CASE_ = args.learning_rate
SCREAMING_SNAKE_CASE_ = args.adam_epsilon
def A_( self ) -> Dict:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A_( self ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError
def A_( self ) -> Dict:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 205
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( a__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError()
| 710
|
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
'''simple docstring'''
def __init__( self : str ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.head == self.tail
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
self.data.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tail + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.data[self.head]
UpperCamelCase = self.head + 1
return ret
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.tail - self.head
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class A__ :
'''simple docstring'''
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.left
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.right
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.height
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : MyNode | None ):
"""simple docstring"""
UpperCamelCase = node
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : MyNode | None ):
"""simple docstring"""
UpperCamelCase = node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = height
def lowercase__ ( _UpperCamelCase) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
print('left rotation node:' , node.get_data())
UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
UpperCamelCase = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_UpperCamelCase)
return ret
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
print('right rotation node:' , node.get_data())
UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
UpperCamelCase = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_UpperCamelCase)
return ret
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_UpperCamelCase))
return right_rotation(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_UpperCamelCase))
return left_rotation(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(_UpperCamelCase)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _UpperCamelCase))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase = right_rotation(_UpperCamelCase)
else:
UpperCamelCase = lr_rotation(_UpperCamelCase)
else:
node.set_right(insert_node(node.get_right() , _UpperCamelCase))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase = rl_rotation(_UpperCamelCase)
else:
UpperCamelCase = left_rotation(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
return node
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
while True:
UpperCamelCase = root.get_right()
if right_child is None:
break
UpperCamelCase = right_child
return root.get_data()
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
while True:
UpperCamelCase = root.get_left()
if left_child is None:
break
UpperCamelCase = left_child
return root.get_data()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> MyNode | None:
"""simple docstring"""
UpperCamelCase = root.get_left()
UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase = get_left_most(_UpperCamelCase)
root.set_data(_UpperCamelCase)
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase))
elif left_child is not None:
UpperCamelCase = left_child
elif right_child is not None:
UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data')
return root
else:
root.set_left(del_node(_UpperCamelCase , _UpperCamelCase))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase))
if get_height(_UpperCamelCase) - get_height(_UpperCamelCase) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
UpperCamelCase = left_rotation(_UpperCamelCase)
else:
UpperCamelCase = rl_rotation(_UpperCamelCase)
elif get_height(_UpperCamelCase) - get_height(_UpperCamelCase) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
UpperCamelCase = right_rotation(_UpperCamelCase)
else:
UpperCamelCase = lr_rotation(_UpperCamelCase)
UpperCamelCase = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(_UpperCamelCase)
return root
class A__ :
'''simple docstring'''
def __init__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return get_height(self.root )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
print('insert:' + str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = insert_node(self.root , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
print('delete:' + str(_SCREAMING_SNAKE_CASE ) )
if self.root is None:
print('Tree is empty!' )
return
UpperCamelCase = del_node(self.root , _SCREAMING_SNAKE_CASE )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
UpperCamelCase = ''
UpperCamelCase = MyQueue()
q.push(self.root )
UpperCamelCase = self.get_height()
if layer == 0:
return output
UpperCamelCase = 0
while not q.is_empty():
UpperCamelCase = q.pop()
UpperCamelCase = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_SCREAMING_SNAKE_CASE )
q.push(_SCREAMING_SNAKE_CASE )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__magic_name__ : Any = AVLtree()
__magic_name__ : str = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 410
| 0
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple )-> int:
__UpperCamelCase = False
def A ( self : List[Any] , A_ : int , A_ : Optional[Any] , A_ : Dict , A_ : List[Any] )-> List[Any]:
if not self.initialized:
__UpperCamelCase = RagRetriever(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , )
__UpperCamelCase = True
def A ( self : Dict )-> List[str]:
self.retriever.index.init_index()
def A ( self : Tuple , A_ : Union[str, Any] , A_ : Optional[Any] )-> Any:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(_lowercase , _lowercase )
return doc_ids, retrieved_doc_embeds
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , A_ : Dict , A_ : List[str] , A_ : Any , A_ : Optional[int] , A_ : Optional[int]=None )-> Dict:
if index is not None and index.is_initialized() and len(_lowercase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you\'ll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowercase , _lowercase , _lowercase , _lowercase )
for worker in self.retrieval_workers
] )
def A ( self : int )-> List[str]:
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A ( self : Tuple , A_ : Any , A_ : Optional[Any] )-> Dict:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(_lowercase , _lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(_lowercase , _lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowercase )
@classmethod
def A ( cls : int , A_ : List[Any] , A_ : List[str]=None , **A_ : Optional[Any] )-> List[str]:
return super(_lowercase , cls ).get_tokenizers(_lowercase , _lowercase , **_lowercase )
@classmethod
def A ( cls : int , A_ : str , A_ : List[str] , A_ : List[Any]=None , **A_ : Dict )-> Dict:
__UpperCamelCase = kwargs.pop("config" , _lowercase ) or RagConfig.from_pretrained(_lowercase , **_lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(_lowercase , config=_lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = "custom"
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , _lowercase )
else:
__UpperCamelCase = cls._build_index(_lowercase )
return cls(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , retrieval_workers=_lowercase , index=_lowercase , )
| 505
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
torch.manual_seed(0)
_lowercase: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCAmelCase__ ( self : List[str]):
_lowercase: Dict = self.dummy_uncond_unet
_lowercase: Tuple = ScoreSdeVeScheduler()
_lowercase: Optional[Any] = ScoreSdeVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase)
sde_ve.to(_UpperCamelCase)
sde_ve.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: int = torch.manual_seed(0)
_lowercase: List[str] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_UpperCamelCase).images
_lowercase: Optional[int] = torch.manual_seed(0)
_lowercase: Tuple = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_UpperCamelCase , return_dict=_UpperCamelCase)[
0
]
_lowercase: Any = image[0, -3:, -3:, -1]
_lowercase: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase: List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : str):
_lowercase: Any = "google/ncsnpp-church-256"
_lowercase: Union[str, Any] = UNetaDModel.from_pretrained(_UpperCamelCase)
_lowercase: Any = ScoreSdeVeScheduler.from_pretrained(_UpperCamelCase)
_lowercase: Optional[int] = ScoreSdeVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase)
sde_ve.to(_UpperCamelCase)
sde_ve.set_progress_bar_config(disable=_UpperCamelCase)
_lowercase: Union[str, Any] = torch.manual_seed(0)
_lowercase: Dict = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=_UpperCamelCase).images
_lowercase: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase: Tuple = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 206
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCAmelCase ( __magic_name__ ):
def wrapper(*__magic_name__ , **__magic_name__ ):
_lowercase: Union[str, Any] = timeit.default_timer()
_lowercase: Tuple = func(*__magic_name__ , **__magic_name__ )
_lowercase: Tuple = timeit.default_timer() - starttime
return delta
_lowercase: str = func.__name__
return wrapper
def __lowerCAmelCase ( __magic_name__ , __magic_name__=1_0_0 , __magic_name__=None ):
_lowercase: Any = []
_lowercase: Tuple = seq_shapes or {}
for i in range(__magic_name__ ):
_lowercase: Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__magic_name__ , _ArrayXD ):
_lowercase: Any = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__magic_name__ , datasets.Value ):
if v.dtype == "string":
_lowercase: List[str] = "The small grey turtle was surprisingly fast when challenged."
else:
_lowercase: int = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(__magic_name__ , datasets.Sequence ):
while isinstance(__magic_name__ , datasets.Sequence ):
_lowercase: int = v.feature
_lowercase: List[str] = seq_shapes[k]
_lowercase: int = np.random.rand(*__magic_name__ ).astype(v.dtype )
_lowercase: List[str] = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__=1_0_0 , __magic_name__=None ):
_lowercase: Optional[Any] = generate_examples(__magic_name__ , num_examples=__magic_name__ , seq_shapes=__magic_name__ )
with ArrowWriter(features=__magic_name__ , path=__magic_name__ ) as writer:
for key, record in dummy_data:
_lowercase: Dict = features.encode_example(__magic_name__ )
writer.write(__magic_name__ )
_lowercase , _lowercase: Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
_lowercase: List[str] = datasets.Dataset.from_file(filename=__magic_name__ , info=datasets.DatasetInfo(features=__magic_name__ ) )
return dataset
| 206
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : "DiagonalGaussianDistribution"
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
a : str =True
@register_to_config
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = ("DownEncoderBlock2D",) , _lowerCamelCase = ("UpDecoderBlock2D",) , _lowerCamelCase = (6_4,) , _lowerCamelCase = 1 , _lowerCamelCase = "silu" , _lowerCamelCase = 4 , _lowerCamelCase = 3_2 , _lowerCamelCase = 3_2 , _lowerCamelCase = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
UpperCamelCase_: List[Any] = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
# pass init params to Decoder
UpperCamelCase_: Optional[Any] = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , norm_num_groups=_lowerCamelCase , act_fn=_lowerCamelCase , )
UpperCamelCase_: Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCamelCase_: Any = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
UpperCamelCase_: List[str] = False
UpperCamelCase_: Tuple = False
# only relevant if vae tiling is enabled
UpperCamelCase_: Union[str, Any] = self.config.sample_size
UpperCamelCase_: Dict = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCamelCase_: str = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCamelCase_: str = 0.2_5
def _a ( self , _lowerCamelCase , _lowerCamelCase=False ):
if isinstance(_lowerCamelCase , (Encoder, Decoder) ):
UpperCamelCase_: str = value
def _a ( self , _lowerCamelCase = True ):
UpperCamelCase_: Any = use_tiling
def _a ( self ):
self.enable_tiling(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = True
def _a ( self ):
UpperCamelCase_: Optional[Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a ( self ):
UpperCamelCase_: Dict = {}
def fn_recursive_add_processors(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , 'set_processor' ):
UpperCamelCase_: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _lowerCamelCase , _lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return processors
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Tuple = len(self.attn_processors.keys() )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_lowerCamelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , 'set_processor' ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
module.set_processor(_lowerCamelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _lowerCamelCase , _lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _a ( self , _lowerCamelCase , _lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowerCamelCase , return_dict=_lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase_: List[Any] = [self.encoder(_lowerCamelCase ) for x_slice in x.split(1 )]
UpperCamelCase_: str = torch.cat(_lowerCamelCase )
else:
UpperCamelCase_: Optional[Any] = self.encoder(_lowerCamelCase )
UpperCamelCase_: Optional[int] = self.quant_conv(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = DiagonalGaussianDistribution(_lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowerCamelCase , return_dict=_lowerCamelCase )
UpperCamelCase_: Dict = self.post_quant_conv(_lowerCamelCase )
UpperCamelCase_: List[Any] = self.decoder(_lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
@apply_forward_hook
def _a ( self , _lowerCamelCase , _lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase_: Optional[int] = [self._decode(_lowerCamelCase ).sample for z_slice in z.split(1 )]
UpperCamelCase_: Optional[Any] = torch.cat(_lowerCamelCase )
else:
UpperCamelCase_: Optional[int] = self._decode(_lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = min(a.shape[2] , b.shape[2] , _lowerCamelCase )
for y in range(_lowerCamelCase ):
UpperCamelCase_: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = min(a.shape[3] , b.shape[3] , _lowerCamelCase )
for x in range(_lowerCamelCase ):
UpperCamelCase_: List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _a ( self , _lowerCamelCase , _lowerCamelCase = True ):
UpperCamelCase_: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase_: Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCamelCase_: int = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase_: Any = []
for i in range(0 , x.shape[2] , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = []
for j in range(0 , x.shape[3] , _lowerCamelCase ):
UpperCamelCase_: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase_: int = self.encoder(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.quant_conv(_lowerCamelCase )
row.append(_lowerCamelCase )
rows.append(_lowerCamelCase )
UpperCamelCase_: Optional[int] = []
for i, row in enumerate(_lowerCamelCase ):
UpperCamelCase_: List[Any] = []
for j, tile in enumerate(_lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase_: int = self.blend_v(rows[i - 1][j] , _lowerCamelCase , _lowerCamelCase )
if j > 0:
UpperCamelCase_: List[Any] = self.blend_h(row[j - 1] , _lowerCamelCase , _lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCamelCase , dim=3 ) )
UpperCamelCase_: Any = torch.cat(_lowerCamelCase , dim=2 )
UpperCamelCase_: Any = DiagonalGaussianDistribution(_lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = True ):
UpperCamelCase_: int = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase_: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCamelCase_: Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase_: Optional[int] = []
for i in range(0 , z.shape[2] , _lowerCamelCase ):
UpperCamelCase_: List[Any] = []
for j in range(0 , z.shape[3] , _lowerCamelCase ):
UpperCamelCase_: int = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase_: List[str] = self.post_quant_conv(_lowerCamelCase )
UpperCamelCase_: List[str] = self.decoder(_lowerCamelCase )
row.append(_lowerCamelCase )
rows.append(_lowerCamelCase )
UpperCamelCase_: Optional[int] = []
for i, row in enumerate(_lowerCamelCase ):
UpperCamelCase_: int = []
for j, tile in enumerate(_lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase_: Any = self.blend_v(rows[i - 1][j] , _lowerCamelCase , _lowerCamelCase )
if j > 0:
UpperCamelCase_: Tuple = self.blend_h(row[j - 1] , _lowerCamelCase , _lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCamelCase , dim=3 ) )
UpperCamelCase_: str = torch.cat(_lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , ):
UpperCamelCase_: List[Any] = sample
UpperCamelCase_: Optional[Any] = self.encode(_lowerCamelCase ).latent_dist
if sample_posterior:
UpperCamelCase_: int = posterior.sample(generator=_lowerCamelCase )
else:
UpperCamelCase_: Dict = posterior.mode()
UpperCamelCase_: Dict = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 57
|
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702
|
UpperCAmelCase_ : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : Dict = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def UpperCamelCase ( _A : int , _A : int , _A : int )-> str:
"""simple docstring"""
assert len(str(_A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A__ = year // 100
A__ = (5 * (century % 4) + 2) % 7
A__ = year % 100
A__ = centurian % 12
A__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 0
|
"""simple docstring"""
def lowercase__ ( snake_case_ :list[int] , snake_case_ :str ):
__UpperCAmelCase = int(snake_case_ )
# Initialize Result
__UpperCAmelCase = []
# Traverse through all denomination
for denomination in reversed(snake_case_ ):
# Find denominations
while int(snake_case_ ) >= int(snake_case_ ):
total_value -= int(snake_case_ )
answer.append(snake_case_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_lowercase : str = []
_lowercase : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
_lowercase : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_lowercase : Optional[Any] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
_lowercase : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
_lowercase : List[str] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
_lowercase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 49
|
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49
| 1
|
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a: Optional[Any] = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __lowerCAmelCase ( A ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def __lowerCAmelCase ( A ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase_ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 719
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[int]=30 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=32 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : str=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : List[Any]=0.6 , lowerCAmelCase : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = mask_ratio
UpperCAmelCase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = TFViTMAEModel(config=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFViTMAEForPreTraining(lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , training=lowerCAmelCase )
# expected sequence length = num_patches
UpperCAmelCase_ = (self.image_size // self.patch_size) ** 2
UpperCAmelCase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTMAEForPreTraining(lowerCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase , training=lowerCAmelCase )
UpperCAmelCase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFViTMAEModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __A ( self : Dict ):
'''simple docstring'''
pass
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , tf.keras.layers.Layer ) )
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def __A ( self : str ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , noise=lowerCAmelCase )
UpperCAmelCase_ = copy.deepcopy(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = model(**lowerCAmelCase , noise=lowerCAmelCase )
UpperCAmelCase_ = outputs_dict[0].numpy()
UpperCAmelCase_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __A ( self : int ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCAmelCase : Dict ):
UpperCAmelCase_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = v.numpy()
else:
UpperCAmelCase_ = np.array(lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = prepare_numpy_arrays(lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , noise=lowerCAmelCase )
UpperCAmelCase_ = model(**lowerCAmelCase , noise=lowerCAmelCase )
self.assert_outputs_same(lowerCAmelCase , lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase_ = tf_noise
super().check_pt_tf_models(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __A ( self : Tuple ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCAmelCase )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(lowerCAmelCase , lowerCAmelCase ),)
if isinstance(lowerCAmelCase , lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCAmelCase , "_keras_serializable" , lowerCAmelCase )
}
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase_ = tf.convert_to_tensor(lowerCAmelCase )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase_ = main_layer_class(lowerCAmelCase )
UpperCAmelCase_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase_ = tf.keras.Model(lowerCAmelCase , outputs=main_layer(lowerCAmelCase ) )
UpperCAmelCase_ = model(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(lowerCAmelCase , "keras_model.h5" )
model.save(lowerCAmelCase )
UpperCAmelCase_ = tf.keras.models.load_model(
lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCAmelCase , tf.keras.Model )
UpperCAmelCase_ = model(lowerCAmelCase )
self.assert_outputs_same(lowerCAmelCase , lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , noise=lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase_ = outputs.last_hidden_state.numpy()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = outputs.logits.numpy()
UpperCAmelCase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase , saved_model=lowerCAmelCase )
UpperCAmelCase_ = model_class.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , noise=lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase_ = after_outputs["last_hidden_state"].numpy()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = after_outputs["logits"].numpy()
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-5 )
def __A ( self : str ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , noise=lowerCAmelCase )
UpperCAmelCase_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCAmelCase )
UpperCAmelCase_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase_ = model_class.from_config(model.config )
UpperCAmelCase_ = new_model(lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase_ = new_model(lowerCAmelCase , noise=lowerCAmelCase )
self.assert_outputs_same(lowerCAmelCase , lowerCAmelCase )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __A ( self : Dict ):
'''simple docstring'''
pass
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(lowerCAmelCase )
def __lowerCAmelCase ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self : Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase_ = ViTMAEConfig()
UpperCAmelCase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase_ = model(**lowerCAmelCase , noise=lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
| 268
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_text_model"
def __init__(self : List[str] , UpperCAmelCase_ : Any=30_524 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Tuple=1E-1_2 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Dict=30_522 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : List[Any]=102 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Optional[int] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =vocab_size
lowerCamelCase__: List[str] =hidden_size
lowerCamelCase__: int =encoder_hidden_size
lowerCamelCase__: List[Any] =intermediate_size
lowerCamelCase__: List[str] =projection_dim
lowerCamelCase__: str =hidden_dropout_prob
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: List[str] =max_position_embeddings
lowerCamelCase__: Any =layer_norm_eps
lowerCamelCase__: Any =hidden_act
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Dict =is_decoder
lowerCamelCase__: Dict =use_cache
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Any) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: str =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type") == "blip":
lowerCamelCase__: List[str] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_vision_model"
def __init__(self : List[str] , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=384 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : str=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=1E-1_0 , **UpperCAmelCase_ : Union[str, Any] , ) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =hidden_size
lowerCamelCase__: List[str] =intermediate_size
lowerCamelCase__: Dict =projection_dim
lowerCamelCase__: Any =num_hidden_layers
lowerCamelCase__: Tuple =num_attention_heads
lowerCamelCase__: Optional[int] =patch_size
lowerCamelCase__: Optional[int] =image_size
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: str =attention_dropout
lowerCamelCase__: str =layer_norm_eps
lowerCamelCase__: Dict =hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type") == "blip":
lowerCamelCase__: Optional[int] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip"
lowercase_ = True
def __init__(self : Any , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str=2.6592 , UpperCAmelCase_ : List[Any]=256 , **UpperCAmelCase_ : Tuple , ) ->int:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if text_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.")
if vision_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.")
lowerCamelCase__: List[str] =BlipTextConfig(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =BlipVisionConfig(**UpperCAmelCase_)
lowerCamelCase__: List[str] =self.vision_config.hidden_size
lowerCamelCase__: Optional[Any] =projection_dim
lowerCamelCase__: List[str] =logit_scale_init_value
lowerCamelCase__: str =1.0
lowerCamelCase__: Tuple =0.02
lowerCamelCase__: int =image_text_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : BlipTextConfig , UpperCAmelCase_ : BlipVisionConfig , **UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =copy.deepcopy(self.__dict__)
lowerCamelCase__: Dict =self.text_config.to_dict()
lowerCamelCase__: List[str] =self.vision_config.to_dict()
lowerCamelCase__: int =self.__class__.model_type
return output
| 59
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: List[str] =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 59
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__snake_case : Dict = None
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__snake_case : int = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
__snake_case : Any = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[int] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = ["""input_ids""", """attention_mask"""]
__a : List[Any] = NllbTokenizer
__a : List[int] = []
__a : List[int] = []
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=None , A=None , A=None , A=False , **A , ) ->str:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ :int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
UpperCAmelCase__ :Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , legacy_behaviour=A , **A , )
UpperCAmelCase__ :List[Any] = vocab_file
UpperCAmelCase__ :Tuple = False if not self.vocab_file else True
UpperCAmelCase__ :List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCAmelCase__ :Optional[Any] = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase__ :Union[str, Any] = src_lang if src_lang is not None else 'eng_Latn'
UpperCAmelCase__ :Optional[int] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase__ :Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ ( self ) ->str:
return self._src_lang
@src_lang.setter
def A__ ( self , A ) ->None:
UpperCAmelCase__ :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self , A , A = None ) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self , A , A = None ) ->List[int]:
UpperCAmelCase__ :List[str] = [self.sep_token_id]
UpperCAmelCase__ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , A , A , A , A , **A ) ->List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase__ :List[str] = src_lang
UpperCAmelCase__ :List[str] = self(A , add_special_tokens=A , return_tensors=A , **A )
UpperCAmelCase__ :Optional[int] = self.convert_tokens_to_ids(A )
UpperCAmelCase__ :Dict = tgt_lang_id
return inputs
def A__ ( self , A , A = "eng_Latn" , A = None , A = "fra_Latn" , **A , ) ->BatchEncoding:
UpperCAmelCase__ :Optional[Any] = src_lang
UpperCAmelCase__ :int = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def A__ ( self ) ->Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self ) ->Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self , A ) ->None:
UpperCAmelCase__ :Optional[Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
UpperCAmelCase__ :Dict = []
UpperCAmelCase__ :List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase__ :List[Any] = [self.cur_lang_code]
UpperCAmelCase__ :Optional[Any] = [self.eos_token_id]
UpperCAmelCase__ :Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__ :Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__ :Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ ( self , A ) ->None:
UpperCAmelCase__ :Optional[Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase__ :Dict = [self.cur_lang_code]
UpperCAmelCase__ :Any = [self.eos_token_id]
UpperCAmelCase__ :Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__ :Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__ :List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A__ ( self , A , A = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase__ :Dict = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 433
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case : Optional[Any] = '\\n\n'
__snake_case : List[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__snake_case : Tuple = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase__ ( datasets.Metric):
'''simple docstring'''
def A__ ( self ) ->int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A__ ( self , A , A , A = 16 , A = True , A=None ) ->Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ :Union[str, Any] = 'cuda'
else:
UpperCAmelCase__ :Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ :Optional[int] = AutoModelForCausalLM.from_pretrained(A )
UpperCAmelCase__ :Any = model.to(A )
UpperCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ :str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ :List[Any] = model.config.max_length - 1
else:
UpperCAmelCase__ :List[Any] = model.config.max_length
UpperCAmelCase__ :Tuple = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='pt' , return_attention_mask=A , ).to(A )
UpperCAmelCase__ :List[Any] = encodings['input_ids']
UpperCAmelCase__ :str = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ :Union[str, Any] = []
UpperCAmelCase__ :int = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
UpperCAmelCase__ :int = min(start_index + batch_size , len(A ) )
UpperCAmelCase__ :str = encoded_texts[start_index:end_index]
UpperCAmelCase__ :List[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ :List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
UpperCAmelCase__ :Any = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ :Optional[int] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
UpperCAmelCase__ :int = encoded_batch
with torch.no_grad():
UpperCAmelCase__ :Optional[Any] = model(A , attention_mask=A ).logits
UpperCAmelCase__ :str = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ :Dict = labels[..., 1:].contiguous()
UpperCAmelCase__ :Any = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ :int = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 433
| 1
|
"""simple docstring"""
from math import sqrt
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Dict = 0
for i in range(1, int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = 10_000 ) -> int:
a_ : str = sum(
i
for i in range(1, SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 237
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( a_ ):
__lowerCAmelCase = (DEISMultistepScheduler,)
__lowerCAmelCase = (("num_inference_steps", 2_5),)
def snake_case_ ( self , **a_ ):
a_ : Optional[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**a_ )
return config
def snake_case_ ( self , a_=0 , **a_ ):
a_ : Union[str, Any] = dict(self.forward_default_kwargs )
a_ : Union[str, Any] = kwargs.pop("num_inference_steps" , a_ )
a_ : List[str] = self.dummy_sample
a_ : Union[str, Any] = 0.1 * sample
a_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : Any = self.get_scheduler_config(**a_ )
a_ : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
a_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
a_ : Dict = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
a_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ , a_ : List[str] = sample, sample
for t in range(a_ , time_step + scheduler.config.solver_order + 1 ):
a_ : Dict = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : List[str] = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
pass
def snake_case_ ( self , a_=0 , **a_ ):
a_ : List[str] = dict(self.forward_default_kwargs )
a_ : Dict = kwargs.pop("num_inference_steps" , a_ )
a_ : List[str] = self.dummy_sample
a_ : str = 0.1 * sample
a_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : Union[str, Any] = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
a_ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
a_ : List[Any] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
a_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ : Optional[int] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : Tuple = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , a_=None , **a_ ):
if scheduler is None:
a_ : Optional[Any] = self.scheduler_classes[0]
a_ : Dict = self.get_scheduler_config(**a_ )
a_ : Union[str, Any] = scheduler_class(**a_ )
a_ : Optional[int] = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(**a_ )
a_ : Tuple = scheduler_class(**a_ )
a_ : Optional[int] = 1_0
a_ : Optional[Any] = self.dummy_model()
a_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : str = model(a_ , a_ )
a_ : str = scheduler.step(a_ , a_ , a_ ).prev_sample
return sample
def snake_case_ ( self ):
a_ : Union[str, Any] = dict(self.forward_default_kwargs )
a_ : Tuple = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
a_ : List[Any] = self.get_scheduler_config()
a_ : str = scheduler_class(**a_ )
a_ : Any = self.dummy_sample
a_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
a_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
a_ : str = dummy_past_residuals[: scheduler.config.solver_order]
a_ : str = scheduler.timesteps[5]
a_ : Dict = scheduler.timesteps[6]
a_ : Dict = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : Any = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a_ : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
a_ : List[str] = self.full_loop(scheduler=a_ )
a_ : Tuple = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
a_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a_ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a_ : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
a_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
a_ : str = self.full_loop(scheduler=a_ )
a_ : Any = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def snake_case_ ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def snake_case_ ( self ):
self.check_over_configs(thresholding=a_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , algorithm_type="deis" , solver_order=a_ , solver_type=a_ , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def snake_case_ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
a_ : List[Any] = self.full_loop(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
assert not torch.isnan(a_ ).any(), "Samples have nan numbers"
def snake_case_ ( self ):
self.check_over_configs(lower_order_final=a_ )
self.check_over_configs(lower_order_final=a_ )
def snake_case_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=a_ , time_step=0 )
def snake_case_ ( self ):
a_ : str = self.full_loop()
a_ : Dict = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def snake_case_ ( self ):
a_ : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
a_ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def snake_case_ ( self ):
a_ : List[str] = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config(thresholding=a_ , dynamic_thresholding_ratio=0 )
a_ : Dict = scheduler_class(**a_ )
a_ : int = 1_0
a_ : List[str] = self.dummy_model()
a_ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : Optional[int] = model(a_ , a_ )
a_ : Optional[Any] = scheduler.step(a_ , a_ , a_ ).prev_sample
assert sample.dtype == torch.floataa
| 237
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ = 'hf-internal-testing/tiny-random-bert'
a_ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
a_ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Dict:
__lowercase : str = cached_file(UpperCamelCase_ , UpperCamelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) )
with open(os.path.join(UpperCamelCase_ , '''refs''' , '''main''' ) ) as f:
__lowercase : List[str] = f.read()
self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''snapshots''' , UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# File is cached at the same place the second time.
__lowercase : Union[str, Any] = cached_file(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Using a specific revision to test the full commit hash.
__lowercase : List[Any] = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision='''9b8c223''' )
self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''snapshots''' , UpperCamelCase_ , UpperCamelCase_ ) )
def _lowerCamelCase ( self ) -> Any:
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid model identifier''' ):
__lowercase : Any = cached_file('''tiny-random-bert''' , UpperCamelCase_ )
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid git identifier''' ):
__lowercase : str = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCamelCase_ , '''does not appear to have a file named''' ):
__lowercase : Optional[Any] = cached_file(UpperCamelCase_ , '''conf''' )
def _lowerCamelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(UpperCamelCase_ , '''does not appear to have a file named''' ):
__lowercase : List[Any] = cached_file(UpperCamelCase_ , '''conf''' )
with open(os.path.join(UpperCamelCase_ , '''refs''' , '''main''' ) ) as f:
__lowercase : Union[str, Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase_ , '''.no_exist''' , UpperCamelCase_ , '''conf''' ) ) )
__lowercase : Optional[Any] = cached_file(UpperCamelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
__lowercase : str = cached_file(UpperCamelCase_ , '''conf''' , local_files_only=UpperCamelCase_ , _raise_exceptions_for_missing_entries=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
__lowercase : Any = mock.Mock()
__lowercase : Optional[Any] = 5_00
__lowercase : List[str] = {}
__lowercase : List[Any] = HTTPError
__lowercase : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head:
__lowercase : Optional[Any] = cached_file(UpperCamelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) )
def _lowerCamelCase ( self ) -> int:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCamelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCamelCase_ , revision='''ahaha''' )
__lowercase : str = get_file_from_repo('''bert-base-cased''' , UpperCamelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowercase : Tuple = json.loads(open(UpperCamelCase_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def _lowerCamelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : Optional[int] = Path(UpperCamelCase_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase_ , '''a.txt''' ) , str(UpperCamelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase_ , '''b.txt''' ) )
| 523
| 0
|
'''simple docstring'''
from typing import Any
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
UpperCAmelCase__ : dict = {}
UpperCAmelCase__ : dict = {}
for state in states_space:
UpperCAmelCase__ : str = observations_space[0]
UpperCAmelCase__ : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase__ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : List[str] = observations_space[o]
UpperCAmelCase__ : Optional[int] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase__ : Optional[int] = ''''''
UpperCAmelCase__ : Tuple = -1
for k_state in states_space:
UpperCAmelCase__ : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase__ : Optional[int] = probability
UpperCAmelCase__ : Any = k_state
# Update probabilities and pointers dicts
UpperCAmelCase__ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase__ : Optional[Any] = arg_max
# The final observation
UpperCAmelCase__ : str = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
UpperCAmelCase__ : int = ''''''
UpperCAmelCase__ : Optional[Any] = -1
for k_state in states_space:
UpperCAmelCase__ : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase__ : Optional[Any] = probability
UpperCAmelCase__ : List[Any] = k_state
UpperCAmelCase__ : Union[str, Any] = arg_max
# Process pointers backwards
UpperCAmelCase__ : Union[str, Any] = last_state
UpperCAmelCase__ : int = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase__ : int = F"""{var_name} must be a list"""
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = F"""{var_name} must be a dict"""
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
UpperCAmelCase__ : int = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
UpperCAmelCase__ : str = '''nested dictionary ''' if nested else ''''''
UpperCAmelCase__ : Dict = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 75
|
import numpy as np
_A = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _lowerCAmelCase :
def __init__( self ) -> None:
SCREAMING_SNAKE_CASE : str =np.array(snake_case_ )
def __a ( self , snake_case_ ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE : Optional[Any] =np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , snake_case_ , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] =self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Tuple =message.lower()
SCREAMING_SNAKE_CASE : Tuple =message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : List[Any] =message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE : Optional[int] =np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Any =self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : Dict =numbers[0]
SCREAMING_SNAKE_CASE : Optional[int] =numbers[1]
SCREAMING_SNAKE_CASE : Optional[int] =first_step.reshape(2 * len(snake_case_ ) )
SCREAMING_SNAKE_CASE : str =''''''
for numbers_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Dict =int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE : str =int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE : Union[str, Any] =self.numbers_to_letter(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =encoded_message + letter
return encoded_message
def __a ( self , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Tuple =message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : List[Any] =np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Optional[int] =self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : str =numbers[0]
SCREAMING_SNAKE_CASE : int =numbers[1]
SCREAMING_SNAKE_CASE : Optional[int] =first_step.reshape((2, len(snake_case_ )) )
SCREAMING_SNAKE_CASE : Optional[Any] =''''''
for numbers_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : List[Any] =int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE : Optional[int] =int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE : int =self.numbers_to_letter(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : int =decoded_message + letter
return decoded_message
| 258
| 0
|
import os
import sys
import transformers
lowerCAmelCase_: Tuple = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 700
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class UpperCAmelCase__ ( snake_case_ ):
lowerCAmelCase_ = '''markuplm'''
def __init__( self : int,__A : Any=3_0_5_2_2,__A : str=7_6_8,__A : Dict=1_2,__A : str=1_2,__A : List[str]=3_0_7_2,__A : int="gelu",__A : Dict=0.1,__A : str=0.1,__A : Tuple=5_1_2,__A : int=2,__A : Union[str, Any]=0.02,__A : Optional[int]=1e-12,__A : int=0,__A : Optional[int]=0,__A : List[str]=2,__A : Any=2_5_6,__A : Union[str, Any]=1_0_2_4,__A : Optional[int]=2_1_6,__A : Union[str, Any]=1_0_0_1,__A : Dict=3_2,__A : Union[str, Any]=5_0,__A : Any="absolute",__A : List[Any]=True,__A : List[str]=None,**__A : List[str],):
super().__init__(
pad_token_id=lowercase__,bos_token_id=lowercase__,eos_token_id=lowercase__,**lowercase__,)
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Union[str, Any] = position_embedding_type
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : Tuple = classifier_dropout
# additional properties
_lowerCamelCase : Union[str, Any] = max_depth
_lowerCamelCase : Any = max_xpath_tag_unit_embeddings
_lowerCamelCase : Dict = max_xpath_subs_unit_embeddings
_lowerCamelCase : Dict = tag_pad_id
_lowerCamelCase : str = subs_pad_id
_lowerCamelCase : List[Any] = xpath_unit_hidden_size
| 44
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , lowercase__ : Tuple , lowercase__ : Any=14 , lowercase__ : int=7 , lowercase__ : int=True , lowercase__ : List[str]=True , lowercase__ : Tuple=False , lowercase__ : Optional[Any]=True , lowercase__ : Any=99 , lowercase__ : Dict=32 , lowercase__ : Tuple=4 , lowercase__ : List[str]=4 , lowercase__ : Optional[Any]=4 , lowercase__ : Union[str, Any]=37 , lowercase__ : Union[str, Any]="gelu" , lowercase__ : List[Any]=0.1 , lowercase__ : Union[str, Any]=0.1 , lowercase__ : List[str]=512 , lowercase__ : int=0.02 , ):
'''simple docstring'''
a_ : Optional[int] = parent
a_ : str = batch_size
a_ : Dict = seq_length
a_ : Tuple = is_training
a_ : Dict = use_input_mask
a_ : str = use_token_type_ids
a_ : Tuple = use_labels
a_ : List[str] = vocab_size
a_ : Union[str, Any] = hidden_size
a_ : Optional[Any] = rotary_dim
a_ : int = num_hidden_layers
a_ : int = num_attention_heads
a_ : Any = intermediate_size
a_ : Any = hidden_act
a_ : Tuple = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : str = initializer_range
a_ : Optional[int] = None
a_ : Any = vocab_size - 1
a_ : Dict = vocab_size - 1
a_ : Tuple = vocab_size - 1
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : str = None
if self.use_input_mask:
a_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Optional[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : Optional[Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Dict = config_and_inputs
a_ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase_ ( self : List[str] , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Any ):
'''simple docstring'''
a_ : str = 20
a_ : Tuple = model_class_name(lowercase__ )
a_ : Union[str, Any] = model.init_cache(input_ids.shape[0] , lowercase__ )
a_ : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
a_ : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
a_ : int = model(
input_ids[:, :-1] , attention_mask=lowercase__ , past_key_values=lowercase__ , position_ids=lowercase__ , )
a_ : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
a_ : int = model(
input_ids[:, -1:] , attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase__ , )
a_ : Dict = model(lowercase__ )
a_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def lowercase_ ( self : Any , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] ):
'''simple docstring'''
a_ : Dict = 20
a_ : str = model_class_name(lowercase__ )
a_ : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
a_ : Optional[int] = model.init_cache(input_ids.shape[0] , lowercase__ )
a_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
a_ : Union[str, Any] = model(
input_ids[:, :-1] , attention_mask=lowercase__ , past_key_values=lowercase__ , position_ids=lowercase__ , )
a_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
a_ : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase__ , position_ids=lowercase__ , )
a_ : List[str] = model(lowercase__ , attention_mask=lowercase__ )
a_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__ : List[str] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__magic_name__ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : str = FlaxGPTJModelTester(self )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ , a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ , a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@tooslow
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
a_ : Optional[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowercase__ , truncation=lowercase__ )
a_ : Union[str, Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
a_ : List[str] = False
a_ : Optional[int] = model.config.eos_token_id
a_ : List[Any] = jax.jit(model.generate )
a_ : Dict = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
a_ : Tuple = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
a_ : List[str] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowercase__ , lowercase__ )
@is_pt_flax_cross_test
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
a_ : Optional[Any] = self._prepare_for_class(lowercase__ , lowercase__ )
a_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a_ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
a_ : Any = getattr(lowercase__ , lowercase__ )
a_ , a_ : Tuple = pt_inputs["""input_ids"""].shape
a_ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase__ ):
a_ : List[str] = 0
a_ : Optional[Any] = 1
a_ : Optional[int] = 0
a_ : List[str] = 1
a_ : Any = pt_model_class(lowercase__ ).eval()
a_ : Optional[Any] = model_class(lowercase__ , dtype=jnp.floataa )
a_ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase__ )
a_ : Tuple = fx_state
with torch.no_grad():
a_ : Dict = pt_model(**lowercase__ ).to_tuple()
a_ : Any = fx_model(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase__ , lowercase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase__ )
a_ : Union[str, Any] = model_class.from_pretrained(lowercase__ , from_pt=lowercase__ )
a_ : Union[str, Any] = fx_model_loaded(**lowercase__ ).to_tuple()
self.assertEqual(
len(lowercase__ ) , len(lowercase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowercase__ , lowercase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
a_ : List[Any] = self._prepare_for_class(lowercase__ , lowercase__ )
a_ : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a_ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
a_ : Optional[Any] = getattr(lowercase__ , lowercase__ )
a_ : Optional[Any] = pt_model_class(lowercase__ ).eval()
a_ : Any = model_class(lowercase__ , dtype=jnp.floataa )
a_ : int = load_flax_weights_in_pytorch_model(lowercase__ , fx_model.params )
a_ , a_ : Any = pt_inputs["""input_ids"""].shape
a_ : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase__ ):
a_ : Union[str, Any] = 0
a_ : List[str] = 1
a_ : Any = 0
a_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
a_ : List[str] = pt_model(**lowercase__ ).to_tuple()
a_ : List[Any] = fx_model(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase__ , lowercase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase__ )
a_ : str = pt_model_class.from_pretrained(lowercase__ , from_flax=lowercase__ )
with torch.no_grad():
a_ : Tuple = pt_model_loaded(**lowercase__ ).to_tuple()
self.assertEqual(
len(lowercase__ ) , len(lowercase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase__ , lowercase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
a_ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase__ )
| 442
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Any = '''RegNetConfig'''
# Base docstring
_SCREAMING_SNAKE_CASE : Tuple = '''facebook/regnet-y-040'''
_SCREAMING_SNAKE_CASE : Tuple = [1, 1088, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Dict = '''facebook/regnet-y-040'''
_SCREAMING_SNAKE_CASE : Optional[int] = '''tabby, tabby cat'''
_SCREAMING_SNAKE_CASE : List[str] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[str] = "relu" , ) -> int:
super().__init__()
lowerCamelCase_ = nn.Convad(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
lowerCamelCase_ = self.convolution(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.normalization(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class a ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : RegNetConfig ) -> Optional[Any]:
super().__init__()
lowerCamelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCamelCase_ = config.num_channels
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
lowerCamelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowerCamelCase_ = self.embedder(__SCREAMING_SNAKE_CASE )
return hidden_state
class a ( nn.Module ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 ) -> Optional[int]:
super().__init__()
lowerCamelCase_ = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , stride=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tensor ) -> Tensor:
lowerCamelCase_ = self.convolution(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.normalization(__SCREAMING_SNAKE_CASE )
return hidden_state
class a ( nn.Module ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Dict:
super().__init__()
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCamelCase_ = nn.Sequential(
nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
# b c h w -> b c 1 1
lowerCamelCase_ = self.pooler(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.attention(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = hidden_state * attention
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : RegNetConfig , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 1 ) -> Optional[Any]:
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = max(1 , out_channels // config.groups_width )
lowerCamelCase_ = (
RegNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
lowerCamelCase_ = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
lowerCamelCase_ = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class a ( nn.Module ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : RegNetConfig , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 1 ) -> int:
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = max(1 , out_channels // config.groups_width )
lowerCamelCase_ = (
RegNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(__SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
lowerCamelCase_ = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
lowerCamelCase_ = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class a ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : RegNetConfig , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , ) -> List[str]:
super().__init__()
lowerCamelCase_ = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowerCamelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , ) , *[layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
lowerCamelCase_ = self.layers(__SCREAMING_SNAKE_CASE )
return hidden_state
class a ( nn.Module ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : RegNetConfig ) -> Union[str, Any]:
super().__init__()
lowerCamelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(RegNetStage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , depth=__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True ) -> BaseModelOutputWithNoAttention:
lowerCamelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
lowerCamelCase_ = stage_module(__SCREAMING_SNAKE_CASE )
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : List[str] = RegNetConfig
SCREAMING_SNAKE_CASE : List[str] = """regnet"""
SCREAMING_SNAKE_CASE : List[Any] = """pixel_values"""
SCREAMING_SNAKE_CASE : Optional[Any] = True
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=False ) -> Tuple:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = value
_SCREAMING_SNAKE_CASE : Tuple = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_SCREAMING_SNAKE_CASE : int = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __snake_case , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a ( __snake_case ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> int:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = config
lowerCamelCase_ = RegNetEmbeddings(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = RegNetEncoder(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.embedder(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = encoder_outputs[0]
lowerCamelCase_ = self.pooler(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , pooler_output=__SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __snake_case , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a ( __snake_case ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> int:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = RegNetModel(__SCREAMING_SNAKE_CASE )
# classification head
lowerCamelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.regnet(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = 'single_label_classification'
else:
lowerCamelCase_ = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 137
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_SCREAMING_SNAKE_CASE : Dict = 637_8137.0
_SCREAMING_SNAKE_CASE : Any = 635_6752.31_4245
_SCREAMING_SNAKE_CASE : List[Any] = 637_8137
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
lowerCamelCase_ = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase_ = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
lowerCamelCase_ = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
lowerCamelCase_ = radians(_lowerCamelCase )
lowerCamelCase_ = radians(_lowerCamelCase )
# Equation
lowerCamelCase_ = sin((phi_a - phi_a) / 2 )
lowerCamelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase_ = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = None
lowerCamelCase__ = field(default='''Translation''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCamelCase ( self ):
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = None
lowerCamelCase__ = field(default='''TranslationVariableLanguages''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = sorted(set(self.languages ) ) if self.languages else None
snake_case__ : int = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({', '.join(__SCREAMING_SNAKE_CASE )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case__ : Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case__ , snake_case__ : Any = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def __UpperCamelCase ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 38
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A = random.Random()
if is_torch_available():
import torch
def lowercase_ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) ->str:
if rng is None:
_snake_case: Any = global_rng
_snake_case: Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict=7 , __snake_case : Optional[Any]=4_00 , __snake_case : List[Any]=20_00 , __snake_case : List[Any]=1 , __snake_case : str=0.0 , __snake_case : str=1_60_00 , __snake_case : int=True , __snake_case : List[Any]=True , ):
'''simple docstring'''
_snake_case: List[Any] = parent
_snake_case: Optional[Any] = batch_size
_snake_case: List[str] = min_seq_length
_snake_case: Optional[int] = max_seq_length
_snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case: Tuple = feature_size
_snake_case: Dict = padding_value
_snake_case: Dict = sampling_rate
_snake_case: Optional[int] = return_attention_mask
_snake_case: str = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : Optional[Any]=False , __snake_case : Dict=False ):
'''simple docstring'''
def _flatten(__snake_case : int ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_snake_case: int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case: Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case: Union[str, Any] = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: int = ASTFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case: Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_snake_case: Tuple = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case: List[str] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_snake_case: Any = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test batched
_snake_case: List[str] = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case: Dict = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case: Tuple = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_snake_case: List[str] = np.asarray(lowerCAmelCase_ )
_snake_case: str = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
_snake_case: Union[str, Any] = feat_extract(lowerCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case: List[str] = np.random.rand(1_00 ).astype(np.floataa )
_snake_case: Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
from datasets import load_dataset
_snake_case: int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case: str = ds.sort('id' ).select(range(lowerCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Tuple = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
_snake_case: Tuple = self._load_datasamples(1 )
_snake_case: Union[str, Any] = ASTFeatureExtractor()
_snake_case: Optional[Any] = feature_extractor(lowerCAmelCase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase_ , atol=1e-4 ) )
| 717
|
'''simple docstring'''
A : List[str] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 273
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase_ = True
UpperCamelCase_ = '''ml.p3.2xlarge'''
UpperCamelCase_ = '''accelerate_sagemaker_execution_role'''
UpperCamelCase_ = '''hf-sm'''
UpperCamelCase_ = '''us-east-1'''
UpperCamelCase_ = 1
UpperCamelCase_ = '''accelerate-sagemaker-1'''
UpperCamelCase_ = '''1.6'''
UpperCamelCase_ = '''4.4'''
UpperCamelCase_ = '''train.py'''
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : str =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCAmelCase )
assert isinstance(converted_args['''do_train'''] , UpperCAmelCase )
assert isinstance(converted_args['''epochs'''] , UpperCAmelCase )
assert isinstance(converted_args['''learning_rate'''] , UpperCAmelCase )
assert isinstance(converted_args['''max_steps'''] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 94
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = KandinskyInpaintPipeline
lowercase_ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase_ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def _UpperCamelCase ( self ) -> List[str]:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> str:
return 100
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Any:
torch.manual_seed(0 )
lowerCamelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase : List[Any] = MultilingualCLIP(UpperCAmelCase_ )
lowerCamelCase : List[Any] = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _UpperCamelCase ( self ) -> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = self.dummy_text_encoder
lowerCamelCase : List[str] = self.dummy_tokenizer
lowerCamelCase : int = self.dummy_unet
lowerCamelCase : Union[str, Any] = self.dummy_movq
lowerCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase_ , )
lowerCamelCase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> Tuple:
lowerCamelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Dict = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
lowerCamelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase : Union[str, Any] = 0
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : List[str] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Any = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCamelCase : int = output.images
lowerCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : Dict = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _UpperCamelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = 'a hat'
lowerCamelCase : Dict = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowerCamelCase : Optional[int] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase : Tuple = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase : Optional[Any] = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 703
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase ( ):
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class _lowercase ( nn.Module ):
def __init__( self ) -> Optional[Any]:
super().__init__()
lowerCamelCase : Dict = nn.Linear(3 , 4 )
lowerCamelCase : Optional[int] = nn.BatchNormad(4 )
lowerCamelCase : List[str] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase_ ) ) )
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase , lowerCamelCase : List[str] = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase_ ):
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = torch.cuda.memory_allocated()
lowerCamelCase : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase_ )
lowerCamelCase : Tuple = release_memory(UpperCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase_ )
| 133
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : int = (DDPMParallelScheduler,)
def snake_case__ ( self :Any , **lowercase :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowercase )
return config
def snake_case__ ( self :Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__ ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def snake_case__ ( self :Any ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def snake_case__ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase )
def snake_case__ ( self :Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def snake_case__ ( self :str ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , )
def snake_case__ ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def snake_case__ ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase )
def snake_case__ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE = samplea.shape[0]
SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE = torch.arange(lowercase )[0:3, None].repeat(1 , lowercase )
SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1e-2
assert abs(result_mean.item() - 0.50_05 ) < 1e-3
def snake_case__ ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def snake_case__ ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def snake_case__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase )
SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(lowercase ):
if i == len(lowercase ) - 1:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = timesteps[i + 1]
SCREAMING_SNAKE_CASE = scheduler.previous_timestep(lowercase )
SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase )
def snake_case__ ( self :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE = len(lowercase )
with self.assertRaises(lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowercase )
| 201
| 1
|
from itertools import product
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(_A , max_face_number + 1 )
for dice_numbers in product(_A , repeat=_A ):
_UpperCAmelCase = sum(_A )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(_A , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(_A , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 719
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[int, Tuple[int, int]] , __UpperCamelCase : Union[int, Tuple[int, int], str] = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , bias=__UpperCamelCase , dilation=__UpperCamelCase , )
_UpperCAmelCase = nn.BatchNormad(__UpperCamelCase )
_UpperCAmelCase = nn.ReLU()
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = self.conv(__UpperCamelCase )
_UpperCAmelCase = self.batch_norm(__UpperCamelCase )
_UpperCAmelCase = self.activation(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(__UpperCamelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : Tuple[int, ...] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool ):
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(__UpperCamelCase ):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase , in_channels=__UpperCamelCase , channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(__UpperCamelCase , self.channels , kernel_size=1 )
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : str ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.bottleneck(__UpperCamelCase )
return output
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor ):
# build laterals
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
_UpperCAmelCase = len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.fpn_bottleneck(__UpperCamelCase )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 3 , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCamelCase , padding=kernel_size // 2 )
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
# just take the relevant feature maps
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = UperNetConfig
__SCREAMING_SNAKE_CASE : str = """pixel_values"""
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = value
__lowerCAmelCase = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowercase , )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : str ):
super().__init__(__UpperCamelCase )
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(__UpperCamelCase , in_channels=self.backbone.channels )
_UpperCAmelCase = UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , output_attentions=__UpperCamelCase )
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 129
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE_ : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=__A ,use_timestep_embedding=__A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowercase = IPNDMScheduler()
_lowercase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self : str ,__A : Tuple ,__A : int=0 ) -> Optional[Any]:
if str(__A ).startswith('mps' ):
_lowercase = torch.manual_seed(__A )
else:
_lowercase = torch.Generator(device=__A ).manual_seed(__A )
_lowercase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase = self.get_dummy_components()
_lowercase = DanceDiffusionPipeline(**__A )
_lowercase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = self.get_dummy_inputs(__A )
_lowercase = pipe(**__A )
_lowercase = output.audios
_lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowercase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def __UpperCAmelCase ( self : Dict ) -> Any:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self : int ) -> Any:
return super().test_attention_slicing_forward_pass()
def __UpperCAmelCase ( self : str ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Any ) -> Any:
_lowercase = torch_device
_lowercase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowercase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(generator=__A ,num_inference_steps=100 ,audio_length_in_s=4.096 )
_lowercase = output.audios
_lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase = torch_device
_lowercase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowercase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(generator=__A ,num_inference_steps=100 ,audio_length_in_s=4.096 )
_lowercase = output.audios
_lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 67
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None , _A : Dict="no" , _A : List[str]="29500" ):
'''simple docstring'''
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase__ : List[str] = True
elif "IPython" in sys.modules:
UpperCAmelCase__ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase__ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[Any] = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
UpperCAmelCase__ : str = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase__ : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase( _A : List[str] , _A : Optional[Any]=() , _A : str=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase__ : Optional[int] = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 614
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __lowercase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__lowerCAmelCase : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__lowerCAmelCase : int = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__lowerCAmelCase : Any = tf_top_k_top_p_filtering(_SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__lowerCAmelCase : List[str] = output[output != -float('''inf''' )]
__lowerCAmelCase : List[Any] = tf.cast(
tf.where(tf.not_equal(_SCREAMING_SNAKE_CASE , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-12 )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_tf
class __lowercase (unittest.TestCase , _UpperCAmelCase ):
'''simple docstring'''
if is_tf_available():
_UpperCamelCase = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCAmelCase : Any = 2
__lowerCAmelCase : str = 2
class __lowercase (tf.Module ):
'''simple docstring'''
def __init__( self , A_ ) ->Any:
'''simple docstring'''
super(_SCREAMING_SNAKE_CASE , self ).__init__()
__lowerCAmelCase : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__lowerCAmelCase : Optional[int] = [[2, 0], [102, 103]]
__lowerCAmelCase : Any = [[1, 0], [1, 1]]
__lowerCAmelCase : Optional[int] = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
__lowerCAmelCase : int = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for batch_size in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 ):
__lowerCAmelCase : Optional[int] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__lowerCAmelCase : Tuple = serving_func(**_SCREAMING_SNAKE_CASE )['''sequences''']
__lowerCAmelCase : Dict = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 2
class __lowercase (tf.Module ):
'''simple docstring'''
def __init__( self , A_ ) ->Dict:
'''simple docstring'''
super(_SCREAMING_SNAKE_CASE , self ).__init__()
__lowerCAmelCase : str = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def UpperCamelCase__ ( self , A_ , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__lowerCAmelCase : Optional[Any] = [[2], [102, 103]]
__lowerCAmelCase : Any = [[1], [1, 1]]
__lowerCAmelCase : List[str] = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
__lowerCAmelCase : Tuple = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for input_row in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : Optional[int] = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__lowerCAmelCase : Dict = serving_func(**_SCREAMING_SNAKE_CASE )['''sequences''']
__lowerCAmelCase : List[str] = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_SCREAMING_SNAKE_CASE )
class __lowercase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : int = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_SCREAMING_SNAKE_CASE , '''spiece.model''' ) , '''rb''' ).read() )
__lowerCAmelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def UpperCamelCase__ ( self , A_ , *A_ , **A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase, __lowerCAmelCase : Tuple = text.pad_model_inputs(
_SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__lowerCAmelCase : Optional[Any] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = CompleteSentenceTransformer()
__lowerCAmelCase : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__lowerCAmelCase : int = complete_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tf.keras.Model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
keras_model.save(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__lowerCAmelCase : Union[str, Any] = 14
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCAmelCase : List[str] = '''Hello, my dog is cute and'''
__lowerCAmelCase : List[str] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
__lowerCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowerCAmelCase : Union[str, Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__lowerCAmelCase : str = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__lowerCAmelCase : Tuple = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__lowerCAmelCase : Union[str, Any] = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__lowerCAmelCase : str = '''Hugging Face is a technology company based in New York and Paris.'''
__lowerCAmelCase : List[Any] = bart_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).input_ids
__lowerCAmelCase : Any = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__lowerCAmelCase : List[str] = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
class __lowercase (_UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , A_ , A_=None , **A_ ) ->List[str]:
'''simple docstring'''
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__lowerCAmelCase : Optional[int] = bart_model.generate(_SCREAMING_SNAKE_CASE , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
class __lowercase (bart_model.model.encoder.__class__ ):
'''simple docstring'''
def UpperCamelCase__ ( self , A_ , **A_ ) ->Optional[int]:
'''simple docstring'''
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__lowerCAmelCase : Union[str, Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__lowerCAmelCase : Union[str, Any] = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_SCREAMING_SNAKE_CASE , foo='''bar''' )
| 713
|
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = len(lowercase__ )
__lowerCAmelCase : Any = len(lowercase__ )
__lowerCAmelCase : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCAmelCase : Optional[Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCAmelCase : Union[str, Any] = True
if a[i].islower():
__lowerCAmelCase : Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCamelCase :
"""simple docstring"""
# setable values
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
@classmethod
def _snake_case ( cls )->int:
'''simple docstring'''
return cls()
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Union[str, Any]:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = () )->KarrasVeSchedulerState:
'''simple docstring'''
A_ : str = jnp.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy()
A_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , schedule=jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : List[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Any = random.split(_SCREAMING_SNAKE_CASE , num=1 )
A_ : List[str] = self.config.s_noise * random.normal(key=_SCREAMING_SNAKE_CASE , shape=sample.shape )
A_ : Optional[Any] = sigma + gamma * sigma
A_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : int = sample_hat + sigma_hat * model_output
A_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : List[Any] = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 590
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = DistilBertTokenizer
snake_case = DistilBertTokenizerFast
snake_case = True
@slow
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Dict = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A_ : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : Any = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 590
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "xmod"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1E-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , a__=False , a__=2 , a__=False , a__=True , a__=True , a__=("en_XX",) , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = classifier_dropout
_lowerCamelCase = pre_norm
_lowerCamelCase = adapter_reduction_factor
_lowerCamelCase = adapter_layer_norm
_lowerCamelCase = adapter_reuse_layer_norm
_lowerCamelCase = ln_before_adapter
_lowerCamelCase = list(a__ )
_lowerCamelCase = default_language
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 706
|
from __future__ import annotations
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''EncodecFeatureExtractor'''
UpperCamelCase_ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , A_ , A_ ) -> Tuple:
"""simple docstring"""
super().__init__(A_ , A_ )
_lowercase: Any = self.feature_extractor
_lowercase: Optional[int] = False
def lowercase_ ( self , A_=None , A_=None , A_=True ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=A_ , language=A_ , no_timestamps=A_ )
def __call__( self , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
_lowercase: List[str] = kwargs.pop('''audio''' , A_ )
_lowercase: List[Any] = kwargs.pop('''sampling_rate''' , A_ )
_lowercase: List[Any] = kwargs.pop('''text''' , A_ )
if len(A_ ) > 0:
_lowercase: str = args[0]
_lowercase: List[str] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_lowercase: Dict = self.tokenizer(A_ , **A_ )
if audio is not None:
_lowercase: Dict = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_lowercase: Dict = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_lowercase: Optional[Any] = audio_inputs['''padding_mask''']
return inputs
def lowercase_ ( self , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = kwargs.pop('''audio''' , A_ )
_lowercase: str = kwargs.pop('''padding_mask''' , A_ )
if len(A_ ) > 0:
_lowercase: Tuple = args[0]
_lowercase: Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(A_ , padding_mask=A_ )
else:
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None ) -> List[np.ndarray]:
"""simple docstring"""
_lowercase: List[str] = to_numpy(A_ )
_lowercase , _lowercase , _lowercase: Optional[Any] = audio_values.shape
if padding_mask is None:
return list(A_ )
_lowercase: Any = to_numpy(A_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_lowercase: Dict = seq_len - padding_mask.shape[-1]
_lowercase: Optional[Any] = 1 - self.feature_extractor.padding_value
_lowercase: List[str] = np.pad(A_ , ((0, 0), (0, difference)) , '''constant''' , constant_values=A_ )
_lowercase: int = audio_values.tolist()
for i in range(A_ ):
_lowercase: List[Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_lowercase: int = sliced_audio.reshape(A_ , -1 )
return audio_values
| 353
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : List[Any] = logging.get_logger(__name__)
A__ : Tuple = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''instructblip_vision_model'''
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(**A_ )
_lowercase: List[str] = hidden_size
_lowercase: Tuple = intermediate_size
_lowercase: List[str] = num_hidden_layers
_lowercase: Union[str, Any] = num_attention_heads
_lowercase: str = patch_size
_lowercase: Union[str, Any] = image_size
_lowercase: int = initializer_range
_lowercase: List[str] = attention_dropout
_lowercase: Tuple = layer_norm_eps
_lowercase: Tuple = hidden_act
_lowercase: Optional[int] = qkv_bias
@classmethod
def lowercase_ ( cls , A_ , **A_ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
_lowercase , _lowercase: str = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_lowercase: Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''instructblip_qformer'''
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
_lowercase: Optional[Any] = vocab_size
_lowercase: Optional[int] = hidden_size
_lowercase: Dict = num_hidden_layers
_lowercase: Union[str, Any] = num_attention_heads
_lowercase: List[Any] = hidden_act
_lowercase: Union[str, Any] = intermediate_size
_lowercase: Union[str, Any] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: Optional[int] = max_position_embeddings
_lowercase: Optional[int] = initializer_range
_lowercase: Optional[Any] = layer_norm_eps
_lowercase: Optional[Any] = position_embedding_type
_lowercase: Tuple = cross_attention_frequency
_lowercase: str = encoder_hidden_size
@classmethod
def lowercase_ ( cls , A_ , **A_ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
_lowercase , _lowercase: Tuple = cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_lowercase: Tuple = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''instructblip'''
UpperCamelCase_ = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> str:
"""simple docstring"""
super().__init__(**A_ )
if vision_config is None:
_lowercase: str = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
_lowercase: List[str] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
_lowercase: Any = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_lowercase: int = InstructBlipVisionConfig(**A_ )
_lowercase: Optional[Any] = InstructBlipQFormerConfig(**A_ )
_lowercase: Optional[Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_lowercase: Optional[Any] = CONFIG_MAPPING[text_model_type](**A_ )
_lowercase: Tuple = self.text_config.tie_word_embeddings
_lowercase: Dict = self.text_config.is_encoder_decoder
_lowercase: List[Any] = num_query_tokens
_lowercase: Any = self.vision_config.hidden_size
_lowercase: List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase: Union[str, Any] = 1.0
_lowercase: Dict = 0.02
@classmethod
def lowercase_ ( cls , A_ , A_ , A_ , **A_ , ) -> str:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: List[str] = copy.deepcopy(self.__dict__ )
_lowercase: Tuple = self.vision_config.to_dict()
_lowercase: Tuple = self.qformer_config.to_dict()
_lowercase: int = self.text_config.to_dict()
_lowercase: Any = self.__class__.model_type
return output
| 353
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase__ ( a , a , a , a = 100 , ):
__snake_case = x_start
__snake_case = fnc(a )
__snake_case = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__snake_case = xa
__snake_case = fxa
return area
if __name__ == "__main__":
def lowerCamelCase__ ( a ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
_lowercase = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 427
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ : Dict = 3
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
_UpperCamelCase = random.randrange(3, _UpperCamelCase )
if pow(_UpperCamelCase, 2, _UpperCamelCase ) == 1:
continue
if pow(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) == 1:
continue
return g
def a__ ( lowercase : List[str] ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
_UpperCamelCase = rabin_miller.generate_large_prime(_UpperCamelCase ) # select large prime number.
_UpperCamelCase = primitive_root(_UpperCamelCase ) # one primitive root on modulo p.
_UpperCamelCase = random.randrange(3, _UpperCamelCase ) # private_key -> have to be greater than 2 for safety.
_UpperCamelCase = cryptomath.find_mod_inverse(pow(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), _UpperCamelCase )
_UpperCamelCase = (key_size, e_a, e_a, p)
_UpperCamelCase = (key_size, d)
return public_key, private_key
def a__ ( lowercase : Optional[int], lowercase : Dict ) -> None:
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_UpperCamelCase = generate_key(_UpperCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""", '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""", '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def a__ ( ) -> None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''', 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 98
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase : Any = load_dataset('''ashraq/esc50''' )
lowercase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
lowercase : Optional[int] = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self ):
pass
@slow
@require_torch
def __lowerCamelCase ( self ):
lowercase : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase : List[str] = load_dataset('''ashraq/esc50''' )
lowercase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
lowercase : Dict = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase : Tuple = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase : Dict = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self ):
pass
| 319
| 0
|
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 25
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase (_lowerCamelCase : int )-> list[int]:
'''simple docstring'''
__snake_case = str(_lowerCamelCase )
__snake_case = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def _UpperCamelCase (_lowerCamelCase : int = 11 )-> list[int]:
'''simple docstring'''
__snake_case = []
__snake_case = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
__snake_case = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def _UpperCamelCase ()-> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 24
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase__ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase( __snake_case , __snake_case , __snake_case=None ) -> str:
if rng is None:
__snake_case = random.Random()
__snake_case = 1
for dim in shape:
total_dims *= dim
__snake_case = []
for _ in range(__snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__snake_case = np.array(__snake_case , dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowerCamelCase( __snake_case , __snake_case=None ) -> Optional[int]:
__snake_case = ids_tensor(__snake_case , vocab_size=2 , rng=__snake_case )
# make sure that at least one token is attended to for each batch
__snake_case = 1
return attn_mask
@require_flax
class UpperCamelCase :
__UpperCamelCase = None
__UpperCamelCase = ()
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__snake_case = 2
__snake_case = inputs["input_ids"].shape[-1] // 2
__snake_case = inputs["input_ids"][:max_batch_size, :sequence_length]
__snake_case = jnp.ones_like(_lowerCAmelCase )
__snake_case = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__snake_case = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__snake_case = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 0
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case = getattr(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = pt_model_class(_lowerCAmelCase ).eval()
__snake_case = load_flax_weights_in_pytorch_model(_lowerCAmelCase ,flax_model.params )
__snake_case = flax_model.generate(_lowerCAmelCase ).sequences
__snake_case = pt_model.generate(torch.tensor(_lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__snake_case = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = True
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 2
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 2
__snake_case = 2
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = True
__snake_case = max_length
__snake_case = 0.8
__snake_case = 10
__snake_case = 0.3
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = max_length
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = max_length
__snake_case = 2
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = False
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = True
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = 2
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
__snake_case = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__snake_case = "Hello world"
__snake_case = tokenizer(_lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowerCAmelCase ,"do_samples" ):
model.generate(_lowerCAmelCase ,do_samples=_lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowerCAmelCase ,"foo" ):
__snake_case = {"foo": "bar"}
model.generate(_lowerCAmelCase ,**_lowerCAmelCase )
| 524
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __snake_case ( _UpperCamelCase):
_lowerCAmelCase = '''altclip_text_model'''
def __init__( self, A=25_0002, A=1024, A=24, A=16, A=4096, A="gelu", A=0.1, A=0.1, A=514, A=1, A=0.02, A=0.02, A=1e-05, A=1, A=0, A=2, A="absolute", A=True, A=768, **A, ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : Any = initializer_range
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = project_dim
class __snake_case ( _UpperCamelCase):
_lowerCAmelCase = '''altclip_vision_model'''
def __init__( self, A=768, A=3072, A=512, A=12, A=12, A=3, A=224, A=32, A="quick_gelu", A=1e-5, A=0.0, A=0.02, A=1.0, **A, ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : int = intermediate_size
lowerCamelCase : Optional[int] = projection_dim
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : int = num_channels
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Optional[int] = hidden_act
@classmethod
def UpperCAmelCase_ ( cls, A, **A ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
lowerCamelCase : Any = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
lowerCamelCase : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase, **_UpperCAmelCase )
class __snake_case ( _UpperCamelCase):
_lowerCAmelCase = '''altclip'''
_lowerCAmelCase = True
def __init__( self, A=None, A=None, A=768, A=2.6592, **A ):
"""simple docstring"""
lowerCamelCase : List[Any] = kwargs.pop('text_config_dict', _UpperCAmelCase )
lowerCamelCase : str = kwargs.pop('vision_config_dict', _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase : int = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase : str = AltCLIPTextConfig(**_UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase : Optional[Any] = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase : int = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config[\"{key}\"]` will be overriden.'''
)
logger.warning(_UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase : Tuple = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase : Optional[Any] = AltCLIPVisionConfig(**_UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase : Optional[int] = {
str(_UpperCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase : str = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase : Optional[Any] = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config[\"{key}\"]` will be overriden.'''
)
logger.warning(_UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase : Dict = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
lowerCamelCase : Any = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
lowerCamelCase : Optional[Any] = AltCLIPTextConfig(**_UpperCAmelCase )
lowerCamelCase : Dict = AltCLIPVisionConfig(**_UpperCAmelCase )
lowerCamelCase : Dict = projection_dim
lowerCamelCase : int = logit_scale_init_value
lowerCamelCase : int = 1.0
@classmethod
def UpperCAmelCase_ ( cls, A, A, **A ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **_UpperCAmelCase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : List[str] = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 713
|
'''simple docstring'''
from manim import *
class __snake_case ( a__):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = Rectangle(height=0.5, width=0.5 )
lowerCamelCase : List[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowerCamelCase : List[str] = [mem.copy() for i in range(6 )]
lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase : str = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Any = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Dict = VGroup(A, A ).arrange(A, buff=0 )
lowerCamelCase : str = Text('CPU', font_size=24 )
lowerCamelCase : int = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
lowerCamelCase : Optional[int] = [mem.copy() for i in range(1 )]
lowerCamelCase : Union[str, Any] = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Optional[Any] = Text('GPU', font_size=24 )
lowerCamelCase : Tuple = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A )
gpu.align_to(A, A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
lowerCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase : Optional[Any] = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Any = Text('Model', font_size=24 )
lowerCamelCase : Tuple = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A, run_time=1 ), Create(A, run_time=1 ), Create(A, run_time=1 ), )
lowerCamelCase : str = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
lowerCamelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase : Tuple = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A, run_time=2.5 ), Write(A ), Write(A ) )
self.add(A )
lowerCamelCase : str = []
lowerCamelCase : Optional[int] = []
lowerCamelCase : Optional[Any] = []
for i, rect in enumerate(A ):
lowerCamelCase : List[str] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(A, opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
lowerCamelCase : int = 0.46 / 4
lowerCamelCase : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=A, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=A, buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A, run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 449
| 0
|
import argparse
import struct
import unittest
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : bytes ):
lowerCamelCase :Any = data
# Initialize hash values
lowerCamelCase :List[Any] = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase :Union[str, Any] = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
lowerCamelCase :Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case ( __snake_case : bytes ):
lowerCamelCase :List[Any] = B'''\x80''' + (B'''\x00''' * (63 - (len(__snake_case ) + 8) % 64))
lowerCamelCase :Any = struct.pack('''>Q''' , (len(__snake_case ) * 8) )
return data + padding + big_endian_integer
def snake_case ( self : List[str] ):
# Convert into blocks of 64 bytes
lowerCamelCase :Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase :Dict = list(struct.unpack('''>16L''' , __snake_case ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase :List[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase :Optional[int] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase :Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
lowerCamelCase :Optional[int] = self.ror(__snake_case , 6 ) ^ self.ror(__snake_case , 11 ) ^ self.ror(__snake_case , 25 )
lowerCamelCase :Optional[int] = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
lowerCamelCase :Optional[int] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
lowerCamelCase :int = self.ror(__snake_case , 2 ) ^ self.ror(__snake_case , 13 ) ^ self.ror(__snake_case , 22 )
lowerCamelCase :List[str] = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase :int = (sa + maj) % 0X1_00_00_00_00
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[int] = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
lowerCamelCase :int = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase :Dict = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase :Any = ''''''.join([hex(__snake_case )[2:].zfill(8 ) for value in self.hashes] )
def snake_case ( self : Union[str, Any] , __snake_case : int , __snake_case : int ):
return 0Xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
import hashlib
lowerCamelCase :List[str] = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(__snake_case ).hash , hashlib.shaaaa(__snake_case ).hexdigest() )
def _lowerCamelCase ( ):
import doctest
doctest.testmod()
lowerCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''')
lowerCamelCase :Any = parser.parse_args()
lowerCamelCase :Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''') as f:
lowerCamelCase :List[str] = f.read()
else:
lowerCamelCase :List[str] = bytes(a_ , '''utf-8''')
print(SHAaaa(a_).hash)
if __name__ == "__main__":
main()
| 166
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : List[str]):
# Initialise PyTorch model
lowerCamelCase :int = BertConfig.from_json_file(a_)
print(F"Building PyTorch model from configuration: {config}")
lowerCamelCase :Union[str, Any] = BertForPreTraining(a_)
# Load weights from tf checkpoint
load_tf_weights_in_bert(a_ , a_ , a_)
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}")
torch.save(model.state_dict() , a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 166
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
A_ = value
A_ = None
A_ = None
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
A_ = tree
def lowercase_ ( self , __UpperCamelCase ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608
|
def lowerCAmelCase ( snake_case__ : list )-> list:
if len(snake_case__ ) <= 1:
return lst
A_ = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ , A_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ = 1
return lst
if __name__ == "__main__":
__magic_name__ : Any = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ : Tuple = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 608
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : str = {'vocab_file': 'vocab.txt'}
lowercase : str = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase : Dict = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
lowercase : List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _lowerCAmelCase ( __lowercase ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ConvBertTokenizer
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE : int="[SEP]" , SCREAMING_SNAKE_CASE : Any="[PAD]" , SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE : str="[MASK]" , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_A , normalizer_state.pop("type" ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_A )
lowerCAmelCase = do_lower_case
def __A ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ) -> str:
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 649
|
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowerCamelCase ( __A : int = 2_000_000 ) -> int:
_UpperCAmelCase : list[int] = [0]
_UpperCAmelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCAmelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCAmelCase : int = 0
# an estimate of b, using the quadratic formula
_UpperCAmelCase : float
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the triangle number corresponding to b_floor
_UpperCAmelCase : int
# the triangle number corresponding to b_ceil
_UpperCAmelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCAmelCase : str = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCAmelCase : Dict = floor(__A )
_UpperCAmelCase : List[Any] = ceil(__A )
_UpperCAmelCase : Union[str, Any] = triangle_numbers[b_floor]
_UpperCAmelCase : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Union[str, Any] = triangle_b_first_guess * triangle_a
_UpperCAmelCase : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Any = triangle_b_second_guess * triangle_a
_UpperCAmelCase : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }')
| 485
| 0
|
from collections import defaultdict
def snake_case ( UpperCAmelCase : int ):
A = 1
A = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCAmelCase )
if ret % 2 == 0:
cuts.append(UpperCAmelCase )
return ret
def snake_case ( ):
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 10, 9
lowerCAmelCase_ = defaultdict(list)
lowerCAmelCase_ = {}
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 720
|
from ...configuration_utils import PretrainedConfig
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = "bert-generation"
def __init__( self : Tuple ,_SCREAMING_SNAKE_CASE : Tuple=5_0_3_5_8 ,_SCREAMING_SNAKE_CASE : str=1_0_2_4 ,_SCREAMING_SNAKE_CASE : List[str]=2_4 ,_SCREAMING_SNAKE_CASE : Optional[Any]=1_6 ,_SCREAMING_SNAKE_CASE : Dict=4_0_9_6 ,_SCREAMING_SNAKE_CASE : str="gelu" ,_SCREAMING_SNAKE_CASE : str=0.1 ,_SCREAMING_SNAKE_CASE : Optional[Any]=0.1 ,_SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,_SCREAMING_SNAKE_CASE : Optional[Any]=0.02 ,_SCREAMING_SNAKE_CASE : List[str]=1E-12 ,_SCREAMING_SNAKE_CASE : Tuple=0 ,_SCREAMING_SNAKE_CASE : Union[str, Any]=2 ,_SCREAMING_SNAKE_CASE : Optional[int]=1 ,_SCREAMING_SNAKE_CASE : str="absolute" ,_SCREAMING_SNAKE_CASE : Optional[int]=True ,**_SCREAMING_SNAKE_CASE : Optional[int] ,) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
| 110
| 0
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
A = 0
A = 0
A = {}
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ) -> int:
if vertex not in self.adjacency:
A = {}
self.num_vertices += 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Optional[Any]:
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
A = weight
A = weight
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
A = self.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
A = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A = edges[i][2] + 1
for edge in edges:
A , A , A = edge
A = weight
A = weight
def __str__( self : Union[str, Any] ) -> str:
A = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
A = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip('\n' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.adjacency.keys()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Dict=None ,A_ : int=None ) -> Tuple:
A = Graph()
if vertices is None:
A = []
if edges is None:
A = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str ) -> Union[str, Any]:
A = {}
A = {}
def __len__( self : int ) -> int:
return len(self.parent )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> Any:
if item in self.parent:
return self.find(A_ )
A = item
A = 0
return item
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ) -> Tuple:
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
A = self.find(self.parent[item] )
return self.parent[item]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ) -> List[str]:
A = self.find(A_ )
A = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A = roota
return roota
if self.rank[roota] < self.rank[roota]:
A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A = roota
return roota
return None
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ) -> Optional[int]:
A = graph.num_vertices
A = Graph.UnionFind()
A = []
while num_components > 1:
A = {}
for vertex in graph.get_vertices():
A = -1
A = graph.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A = edge
A = union_find.find(A_ )
A = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ ,A_ )
mst_edges.append(cheap_edge[vertex] )
A = num_components - 1
A = Graph.build(edges=A_ )
return mst
| 91
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__snake_case : Dict = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , x.transpose() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : str = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(3 , 4 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : int = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , np.asarray(transpose(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.reshape(_UpperCAmelCase , (4, 3) ) ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.reshape(_UpperCAmelCase , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[int] = np.random.randn(3 , 4 )
__snake_case : Optional[int] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : int = np.random.randn(3 , 4 , 5 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[int] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.asarray(reshape(_UpperCAmelCase , (4, 3) ) ) ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.asarray(reshape(_UpperCAmelCase , (12, 5) ) ) ) )
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.squeeze(_UpperCAmelCase ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.squeeze(_UpperCAmelCase , axis=2 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(1 , 3 , 4 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Tuple = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Dict = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.asarray(squeeze(_UpperCAmelCase ) ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.asarray(squeeze(_UpperCAmelCase , axis=2 ) ) ) )
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.expand_dims(_UpperCAmelCase , axis=1 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Dict = np.random.randn(3 , 4 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(_UpperCAmelCase , axis=1 ) ) ) )
| 576
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563
|
from __future__ import annotations
import pandas as pd
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
A_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i]
A_ = 0
A_ = 0
A_ = 999999999
A_ = 0
A_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A_ = remaining_time[j]
A_ = j
A_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A_ = remaining_time[short]
if minm == 0:
A_ = 999999999
if remaining_time[short] == 0:
complete += 1
A_ = False
# Find finish time of current process
A_ = increment_time + 1
# Calculate waiting time
A_ = finish_time - arrival_time[short]
A_ = finar - burst_time[short]
if waiting_time[short] < 0:
A_ = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(SCREAMING_SNAKE_CASE ):
A_ = total_waiting_time + waiting_time[i]
A_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__lowercase = int(input())
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
__lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__lowercase , __lowercase = map(int, input().split())
__lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowercase = burst_time
__lowercase = no_of_processes
__lowercase = waiting_time
__lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 563
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (DDIMParallelScheduler,)
__UpperCAmelCase : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def __snake_case ( self : int , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : Any = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : Optional[Any] , **lowerCamelCase : List[str] ) -> Any:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config(**lowerCamelCase )
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : List[Any] = 10, 0.0
__snake_case : Union[str, Any] = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for t in scheduler.timesteps:
__snake_case : List[Any] = model(lowerCamelCase , lowerCamelCase )
__snake_case : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __snake_case ( self : Tuple ) -> Dict:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : Dict ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __snake_case ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Any:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase )
def __snake_case ( self : Dict ) -> List[str]:
self.check_over_configs(thresholding=lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , )
def __snake_case ( self : int ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase )
def __snake_case ( self : int ) -> List[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase , num_inference_steps=lowerCamelCase )
def __snake_case ( self : Dict ) -> str:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase , eta=lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : str = 10, 0.0
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Dict = self.dummy_model()
__snake_case : List[Any] = self.dummy_sample_deter
__snake_case : int = self.dummy_sample_deter + 0.1
__snake_case : str = self.dummy_sample_deter - 0.1
__snake_case : Dict = samplea.shape[0]
__snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : int = torch.arange(lowerCamelCase )[0:3, None].repeat(1 , lowerCamelCase )
__snake_case : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Any = scheduler.batch_step_no_noise(lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase )
__snake_case : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Union[str, Any] = self.full_loop()
__snake_case : Dict = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __snake_case ( self : str ) -> Dict:
__snake_case : Any = self.full_loop(prediction_type="v_prediction" )
__snake_case : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __snake_case ( self : Dict ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
__snake_case : str = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __snake_case ( self : Optional[Any] ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 81
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__A = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__A = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : List[str] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , bootstrap_aggregation=_lowerCamelCase , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = calculate_rouge(_lowerCamelCase , _lowerCamelCase , bootstrap_aggregation=_lowerCamelCase , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "rougeLsum"
__lowerCamelCase : Optional[Any] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=[k] )[k]
__lowerCamelCase : List[str] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ["rouge1", "rouge2", "rougeL"]
__lowerCamelCase : Dict = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=_lowerCamelCase )
__lowerCamelCase : int = calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase , rouge_keys=_lowerCamelCase )
assert score_sep == score_no_sep
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
__lowerCamelCase : Optional[int] = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase ) == calculate_rouge(_lowerCamelCase , _lowerCamelCase , newline_sep=_lowerCamelCase )
def lowercase_ ( ) -> int:
'''simple docstring'''
__lowerCamelCase : Dict = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
__lowerCamelCase : int = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
__lowerCamelCase : str = calculate_rouge(_lowerCamelCase , _lowerCamelCase , rouge_keys=["rougeLsum"] , newline_sep=_lowerCamelCase )["rougeLsum"]
__lowerCamelCase : List[Any] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = Path("examples/seq2seq/test_data/wmt_en_ro" )
__lowerCamelCase : Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Tuple = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_lowerCamelCase )
assert isinstance(_lowerCamelCase , _lowerCamelCase )
| 646
| 0
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = int(snake_case )
assert noofclusters < len(snake_case )
# Find out the dimensionality
SCREAMING_SNAKE_CASE:List[str] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE:str = list(range(len(snake_case ) ) )
shuffle(snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE:str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE:Any = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE:Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE:List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE:Optional[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case , snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE:Optional[Any] = [tf.Variable(0 ) for i in range(len(snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE:Dict = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE:List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case , snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE:Optional[int] = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.reduce_mean(snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE:Optional[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE:List[str] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE:Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case , snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.argmin(snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE:Optional[Any] = 100
for _ in range(snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case ) ):
SCREAMING_SNAKE_CASE:Optional[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE:Optional[int] = [
sess.run(snake_case , feed_dict={va: vect, va: sess.run(snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE:Optional[int] = sess.run(
snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE:Union[str, Any] = [
vectors[i]
for i in range(len(snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE:Tuple = sess.run(
snake_case , feed_dict={mean_input: array(snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE:Tuple = sess.run(snake_case )
SCREAMING_SNAKE_CASE:int = sess.run(snake_case )
return centroids, assignments
| 465
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_a ):
_A : Any = ['''torch''', '''torchsde''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
| 465
| 1
|
from __future__ import annotations
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ ) // 2
# choose the middle 3 elements
UpperCAmelCase_ =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54
| 1
|
def A_ ( snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 451
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Union[str, Any] = 1_6
lowercase__ : Tuple = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : List[str] = mocked_dataloaders # noqa: F811
def A_ ( snake_case : str , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case )
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case ),
'''epoch''': epoch,
} , step=snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 451
| 1
|
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> bool:
UpperCamelCase : List[str] = len(snake_case__ )
UpperCamelCase : Any = len(snake_case__ )
UpperCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase : List[str] = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase : Tuple = True
if a[i].islower():
UpperCamelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 586
| 0
|
from __future__ import annotations
lowerCamelCase__ = '''Muhammad Umer Farooq'''
lowerCamelCase__ = '''MIT'''
lowerCamelCase__ = '''1.0.0'''
lowerCamelCase__ = '''Muhammad Umer Farooq'''
lowerCamelCase__ = '''contact@muhammadumerfarooq.me'''
lowerCamelCase__ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : str) -> None:
"""simple docstring"""
super().__init__()
_UpperCamelCase = []
_UpperCamelCase = domain
def __UpperCAmelCase ( self : int , lowercase_ : str , lowercase_ : list[tuple[str, str | None]]) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase = parse.urljoin(self.domain , lowercase_)
self.urls.append(lowercase_)
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return ".".join(get_sub_domain_name(a__ ).split("." )[-2:] )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return parse.urlparse(a__ ).netloc
def lowerCAmelCase__ ( a__ = "https://github.com" ) ->list[str]:
'''simple docstring'''
_UpperCamelCase = get_domain_name(a__ )
# Initialize the parser
_UpperCamelCase = Parser(a__ )
try:
# Open URL
_UpperCamelCase = requests.get(a__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase = requests.get(a__ )
# Get the valid email.
_UpperCamelCase = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a__ )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 82
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Union[str, Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__snake_case = input_file.read()
__snake_case = regexp.search(a_ )
return match
def A ( self : Any , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__snake_case = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case = regexp.finditer(a_ )
__snake_case = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a_ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a_ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 69
| 0
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "num_attention_heads" ) )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , __A : Optional[int] , __A : Dict=1_3 , __A : Union[str, Any]=6_4 , __A : int=3 , __A : List[str]=3 , __A : Optional[Any]=2 , __A : Dict=1 , __A : Optional[int]=1_6 , __A : Any=[1_2_8, 2_5_6, 3_8_4] , __A : List[str]=[4, 6, 8] , __A : Optional[Any]=[2, 3, 4] , __A : str=[1_6, 1_6, 1_6] , __A : Dict=0 , __A : Dict=[2, 2, 2] , __A : Dict=[2, 2, 2] , __A : Dict=0.0_2 , __A : Dict=True , __A : Dict=True , __A : str=2 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = num_channels
snake_case__ : int = kernel_size
snake_case__ : Dict = stride
snake_case__ : Union[str, Any] = padding
snake_case__ : int = hidden_sizes
snake_case__ : List[str] = num_attention_heads
snake_case__ : int = depths
snake_case__ : Optional[Any] = key_dim
snake_case__ : Dict = drop_path_rate
snake_case__ : Union[str, Any] = patch_size
snake_case__ : str = attention_ratio
snake_case__ : Union[str, Any] = mlp_ratio
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case__ : Union[str, Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[Any] = num_labels
snake_case__ : Dict = initializer_range
def _lowercase ( self : Tuple ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowercase ( self : Dict , __A : int , __A : List[str] , __A : List[str] ):
snake_case__ : Any = LevitModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[int] = model(__A )
snake_case__ : Optional[int] = (self.image_size, self.image_size)
snake_case__, snake_case__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case__ : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowercase ( self : Dict , __A : Tuple , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : Optional[Any] = LevitForImageClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__ : Dict = config_and_inputs
snake_case__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : str = LevitModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _lowercase ( self : Dict ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _lowercase ( self : str ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : int ):
snake_case__, snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__A )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Tuple ):
def check_hidden_states_output(__A : Any , __A : Dict , __A : Any ):
snake_case__ : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) , __A )
snake_case__ : Dict = (self.model_tester.image_size, self.model_tester.image_size)
snake_case__, snake_case__ : str = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case__ : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Tuple ):
pass
def _lowercase ( self : List[str] , __A : str , __A : Optional[int] , __A : Optional[int]=False ):
snake_case__ : Optional[Any] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _lowercase ( self : List[Any] ):
if not self.model_tester.is_training:
return
snake_case__, snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case__ : List[str] = model_class(__A )
model.to(__A )
model.train()
snake_case__ : Dict = self._prepare_for_class(__A , __A , return_labels=__A )
snake_case__ : Tuple = model(**__A ).loss
loss.backward()
def _lowercase ( self : Tuple ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : str = False
snake_case__ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case__ : Union[str, Any] = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
snake_case__ : int = self._prepare_for_class(__A , __A , return_labels=__A )
snake_case__ : Tuple = model(**__A ).loss
loss.backward()
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
snake_case__ : Optional[Any] = problem_type["title"]
snake_case__ : str = problem_type["num_labels"]
snake_case__ : int = model_class(__A )
model.to(__A )
model.train()
snake_case__ : Optional[int] = self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
snake_case__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
snake_case__ : List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
snake_case__ : List[str] = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowercase ( self : List[str] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__A )
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 25
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase = "\\n Text data.\n Second line of data."
UpperCamelCase = "file"
@pytest.fixture(scope="""session""" )
def A ( lowercase__ : List[str] ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCamelCase__ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def A ( lowercase__ : str ) -> int:
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , """w""" ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase__ :List[Any] = input_paths[compression_format]
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Dict = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
UpperCamelCase__ :int = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
UpperCamelCase__ :int = f.read()
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[Any] ) -> List[str]:
UpperCamelCase__ :Dict = """custom_cache"""
UpperCamelCase__ :Union[str, Any] = """custom_extracted_dir"""
UpperCamelCase__ :Optional[int] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase__ :Union[str, Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase__ :Optional[int] = xz_file
UpperCamelCase__ :Optional[Any] = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
UpperCamelCase__ :str = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def A ( lowercase__ : List[str] ) -> Dict:
# absolute path
UpperCamelCase__ :Optional[Any] = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
UpperCamelCase__ :str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def A ( lowercase__ : Optional[Any] ) -> Tuple:
# absolute path
UpperCamelCase__ :Tuple = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
UpperCamelCase__ :Tuple = """./__missing_file__.txt"""
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Any = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( ) -> Tuple:
with pytest.raises(lowercase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
http_get("""https://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : List[Any] ) -> int:
UpperCamelCase__ :List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head("""s3://huggingface.co""" )
| 45
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None , _A : Dict="no" , _A : List[str]="29500" ):
'''simple docstring'''
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase__ : List[str] = True
elif "IPython" in sys.modules:
UpperCAmelCase__ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase__ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[Any] = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
UpperCAmelCase__ : str = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase__ : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase( _A : List[str] , _A : Optional[Any]=() , _A : str=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase__ : Optional[int] = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 614
| 0
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase = randint(-5000 , 5000 )
return (arr, r)
_lowerCamelCase = make_dataset()
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
for triplet in permutations(snake_case_ , 3 ):
if sum(snake_case_ ) == target:
return tuple(sorted(snake_case_ ) )
return (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
arr.sort()
_lowercase = len(snake_case_ )
for i in range(n - 1 ):
_lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
_lowercase = "\ntriplet_sum1(*dataset)\n"
_lowercase = "\ntriplet_sum2(*dataset)\n"
_lowercase = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=10000 )
_lowercase = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=10000 )
return (min(snake_case_ ), min(snake_case_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 719
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : int = 'efficientnet'
def __init__( self : Optional[int] , lowercase__ : int = 3 , lowercase__ : int = 6_00 , lowercase__ : float = 2.0 , lowercase__ : float = 3.1 , lowercase__ : int = 8 , lowercase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase__ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowercase__ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowercase__ : List[int] = [] , lowercase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase__ : float = 0.25 , lowercase__ : str = "swish" , lowercase__ : int = 25_60 , lowercase__ : str = "mean" , lowercase__ : float = 0.02 , lowercase__ : float = 0.001 , lowercase__ : float = 0.99 , lowercase__ : float = 0.5 , lowercase__ : float = 0.2 , **lowercase__ : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**lowercase__)
_lowercase = num_channels
_lowercase = image_size
_lowercase = width_coefficient
_lowercase = depth_coefficient
_lowercase = depth_divisor
_lowercase = kernel_sizes
_lowercase = in_channels
_lowercase = out_channels
_lowercase = depthwise_padding
_lowercase = strides
_lowercase = num_block_repeats
_lowercase = expand_ratios
_lowercase = squeeze_expansion_ratio
_lowercase = hidden_act
_lowercase = hidden_dim
_lowercase = pooling_type
_lowercase = initializer_range
_lowercase = batch_norm_eps
_lowercase = batch_norm_momentum
_lowercase = dropout_rate
_lowercase = drop_connect_rate
_lowercase = sum(lowercase__) * 4
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' )
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _UpperCAmelCase ( self : str) ->float:
"""simple docstring"""
return 1e-5
| 572
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase : List[Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ : List[str] =tmp_path_factory.getbasetemp() / '''cache'''
A__ : Optional[int] =test_hf_cache_home / '''datasets'''
A__ : Optional[int] =test_hf_cache_home / '''metrics'''
A__ : Optional[int] =test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(snake_case_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(snake_case_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(snake_case_ ) )
A__ : List[Any] =test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(snake_case_ ) )
A__ : Any =test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(snake_case_ ) )
@pytest.fixture(autouse=snake_case_, scope='''session''' )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', snake_case_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', snake_case_ )
| 416
|
from typing import Any
class a :
def __init__( self , __UpperCamelCase )-> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =data
A__ : Tuple =None
def __repr__( self )-> str:
'''simple docstring'''
return F'Node({self.data})'
class a :
def __init__( self )-> Optional[int]:
'''simple docstring'''
A__ : Tuple =None
def __iter__( self )-> Any:
'''simple docstring'''
A__ : int =self.head
while node:
yield node.data
A__ : Tuple =node.next
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __getitem__( self , __UpperCamelCase )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __UpperCamelCase , __UpperCamelCase )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
A__ : Any =self.head
for _ in range(__UpperCamelCase ):
A__ : int =current.next
A__ : Dict =data
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , __UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
self.insert_nth(0 , __UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
A__ : Tuple =Node(__UpperCamelCase )
if self.head is None:
A__ : Tuple =new_node
elif index == 0:
A__ : Dict =self.head # link new_node to head
A__ : List[Any] =new_node
else:
A__ : int =self.head
for _ in range(index - 1 ):
A__ : List[str] =temp.next
A__ : List[Any] =temp.next
A__ : Optional[Any] =new_node
def lowerCAmelCase_ ( self )-> None: # print every node data
'''simple docstring'''
print(self )
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def lowerCAmelCase_ ( self )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase_ ( self , __UpperCamelCase = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
A__ : Optional[Any] =self.head # default first node
if index == 0:
A__ : int =self.head.next
else:
A__ : List[str] =self.head
for _ in range(index - 1 ):
A__ : List[str] =temp.next
A__ : int =temp.next
A__ : Any =temp.next.next
return delete_node.data
def lowerCAmelCase_ ( self )-> bool:
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( self )-> None:
'''simple docstring'''
A__ : Dict =None
A__ : Optional[int] =self.head
while current:
# Store the current node's next node.
A__ : int =current.next
# Make the current node's next point backwards
A__ : Dict =prev
# Make the previous node be the current node
A__ : Dict =current
# Make the current node the next node (to progress iteration)
A__ : List[str] =next_node
# Return prev in order to put the head at the end
A__ : List[str] =prev
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ : Union[str, Any] =LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_, i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1, 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0, 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1, 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
A__ : Dict =-i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8, 1 ) )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ : Union[str, Any] =[
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'''dlrow olleH''',
7,
5_5_5_5,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(1_0 ),
None,
None,
1_2.2_0,
]
A__ : Optional[Any] =LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ : Tuple =linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ : Union[str, Any] =linked_list.delete_tail()
assert result == 1_2.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ : Tuple =linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
A__ : Optional[Any] =LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(snake_case_ )
print('''\nReading/changing Node data using indexing:''' )
print(f'Element at Position 1: {linked_list[1]}' )
A__ : List[str] =input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 416
| 1
|
import numpy as np
def lowercase ( SCREAMING_SNAKE_CASE__ : np.ndarray ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def lowercase ( SCREAMING_SNAKE_CASE__ : np.ndarray ) -> np.ndarray:
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
|
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_000 ) -> int:
_snake_case , _snake_case : str = 1, 1
_snake_case : List[Any] = 2
while True:
_snake_case : Union[str, Any] = 0
_snake_case : int = fa + fa
_snake_case , _snake_case : Union[str, Any] = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 198
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case ( lowerCAmelCase : Tuple=3_2 , lowerCAmelCase : Optional[Any]=1_0 , lowerCAmelCase : Optional[int]=1_0_0 , lowerCAmelCase : str=1_0_2_6 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase : Any="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE_ : Tuple = generate_datasets(
__a , __a , number=__a , min_len=1_0_2_6 , trim=__a )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE_ : List[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
SCREAMING_SNAKE_CASE_ : List[Any] = load_gpta("gpt2" ).to(__a )
print("computing perplexity on objective set" )
SCREAMING_SNAKE_CASE_ : Tuple = compute_perplexity(__a , __a , __a ).item()
print("perplexity on objective set:" , __a )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__a , __a , __a , __a , __a , __a , __a , __a )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : List[Any]=1_5 , lowerCAmelCase : Any=1_2_8 , lowerCAmelCase : Dict=1_0_0 , lowerCAmelCase : List[str]="igf_model.pt" , ):
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
SCREAMING_SNAKE_CASE_ : int = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE_ : str = SecondaryLearner(__a )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : List[Any] = train_secondary_learner(
__a , __a , max_epochs=__a , batch_size=__a , eval_freq=1_0_0 , igf_model_path=__a , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=3_2 , lowerCAmelCase : Any=1_0_0_0 , lowerCAmelCase : Optional[Any]=1_6 , lowerCAmelCase : Dict=1.0 , lowerCAmelCase : Tuple=recopy_gpta , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=1_0 , lowerCAmelCase : Dict="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : List[str] = RandomSampler(__a )
SCREAMING_SNAKE_CASE_ : Any = DataLoader(__a , sampler=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = max_steps // (len(__a )) + 1
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : int = torch.zeros((1, context_len) , dtype=torch.long , device=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = recopy_model(__a , __a , __a )
model.train()
if secondary_learner is not None:
secondary_learner.to(__a )
secondary_learner.eval()
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE_ : int = compute_perplexity(__a , __a , __a )
test_perps.append(__a )
print("Test perplexity, step" , __a , ":" , __a )
for epoch in range(int(__a ) ):
for step, example in enumerate(__a ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ : List[Any] = model(__a , labels=__a )
SCREAMING_SNAKE_CASE_ : Any = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE_ : Dict = secondary_learner.forward(
torch.tensor(__a , dtype=torch.long , device=__a ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__a ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE_ : Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE_ : Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE_ : List[str] = compute_perplexity(__a , __a , __a )
test_perps.append(__a )
print("Test perplexity, step" , __a , ":" , __a )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , __a )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__a , type=__a , required=__a , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__a , default=__a , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__a , default=__a , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__a , type=__a , required=__a , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__a , type=__a , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__a , default=__a , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=3_2 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_0_0 , type=__a , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_0_0 , type=__a , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_0_0_0 , type=__a , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_2_8 , type=__a , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=1_6 , type=__a , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=1_0 , type=__a , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_0_0 , type=__a , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_0_2_6 , type=__a , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=1_5 , type=__a , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__a , type=__a , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__a , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__a , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__a , type=__a , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=__a , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE_ : List[str] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : Optional[Any] = training_secondary_learner(
__a , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE_ : Tuple = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_datasets(
context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=__a )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__a , __a , __a , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=__a , secondary_learner=__a , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 216
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59
| 0
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=None , _lowerCamelCase="no" , _lowerCamelCase="29500" ):
'''simple docstring'''
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_lowerCAmelCase : Dict = True
elif "IPython" in sys.modules:
_lowerCAmelCase : Dict = 'google.colab' in str(sys.modules["IPython"].get_ipython() )
try:
_lowerCAmelCase : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_lowerCAmelCase : List[Any] = 8
_lowerCAmelCase : Optional[int] = PrepareForLaunch(__lowercase , distributed_type="TPU" )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr="127.0.01" , master_port=__lowercase , mixed_precision=__lowercase ):
_lowerCAmelCase : Tuple = PrepareForLaunch(__lowercase , distributed_type="MULTI_GPU" )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowerCAmelCase : Optional[Any] = '1'
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__lowercase )
def A ( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_lowerCAmelCase : Any = PrepareForLaunch(__lowercase , debug=__lowercase )
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
| 704
|
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658
| 0
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a_ :
def __init__( self : str , UpperCAmelCase__ : str = "cpu" , UpperCAmelCase__ : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
snake_case : Optional[Any] = device
snake_case : int = CLIPTokenizerFast.from_pretrained(UpperCAmelCase__ )
snake_case : Union[str, Any] = [0.4814_5466, 0.457_8275, 0.4082_1073]
snake_case : Optional[Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
snake_case : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case : Union[str, Any] = torchvision.transforms.Resize(224 )
snake_case : int = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.resize(UpperCAmelCase__ )
snake_case : Dict = self.center_crop(UpperCAmelCase__ )
snake_case : Optional[Any] = self.normalize(UpperCAmelCase__ )
return images
def __call__( self : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Tuple = self.tokenizer(text=UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Dict = self.preprocess_img(UpperCAmelCase__ )
snake_case : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a_ ( nn.Module ):
def __init__( self : str , UpperCAmelCase__ : List[str]=10 , UpperCAmelCase__ : Optional[Any]=0.01 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str="image" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=False , ):
"""simple docstring"""
super().__init__()
snake_case : Optional[int] = None
snake_case : List[Any] = device if device else get_device()
if vqgan:
snake_case : Any = vqgan
else:
snake_case : Any = load_vqgan(self.device , conf_path=UpperCAmelCase__ , ckpt_path=UpperCAmelCase__ )
self.vqgan.eval()
if clip:
snake_case : Tuple = clip
else:
snake_case : int = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
snake_case : Union[str, Any] = ProcessorGradientFlow(device=self.device )
snake_case : Optional[int] = iterations
snake_case : List[Any] = lr
snake_case : str = log
snake_case : str = make_grid
snake_case : Tuple = return_val
snake_case : List[str] = quantize
snake_case : Any = self.vqgan.decoder.z_shape
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=True ):
"""simple docstring"""
snake_case : List[str] = []
if output_path is None:
snake_case : Any = '''./animation.gif'''
if input_path is None:
snake_case : List[str] = self.save_path
snake_case : Union[str, Any] = sorted(glob(input_path + '''/*''' ) )
if not len(UpperCAmelCase__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(UpperCAmelCase__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
snake_case : List[Any] = total_duration / len(UpperCAmelCase__ )
snake_case : Tuple = [frame_duration] * len(UpperCAmelCase__ )
if extend_frames:
snake_case : Optional[int] = 1.5
snake_case : int = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(UpperCAmelCase__ ) )
imageio.mimsave(UpperCAmelCase__ , UpperCAmelCase__ , duration=UpperCAmelCase__ )
print(F"gif saved to {output_path}" )
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
snake_case : int = preprocess(Image.open(UpperCAmelCase__ ) , target_image_size=256 ).to(self.device )
snake_case : Optional[Any] = preprocess_vqgan(UpperCAmelCase__ )
snake_case , *snake_case : Any = self.vqgan.encode(UpperCAmelCase__ )
return z
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : str = self.latent.detach().requires_grad_()
snake_case : int = base_latent + transform_vector
if self.quantize:
snake_case , *snake_case : str = self.vqgan.quantize(UpperCAmelCase__ )
else:
snake_case : str = trans_latent
return self.vqgan.decode(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=None ):
"""simple docstring"""
snake_case : List[Any] = self.clip_preprocessor(text=UpperCAmelCase__ , images=UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ )
snake_case : str = self.clip(**UpperCAmelCase__ )
snake_case : Dict = clip_outputs.logits_per_image
if weights is not None:
snake_case : str = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = self._get_clip_similarity(pos_prompts['''prompts'''] , UpperCAmelCase__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
snake_case : Optional[int] = self._get_clip_similarity(neg_prompts['''prompts'''] , UpperCAmelCase__ , weights=neg_prompts['''weights'''] )
else:
snake_case : Union[str, Any] = torch.tensor([1] , device=self.device )
snake_case : Any = -torch.log(UpperCAmelCase__ ) + torch.log(UpperCAmelCase__ )
return loss
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : str = torch.randn_like(self.latent , requires_grad=UpperCAmelCase__ , device=self.device )
snake_case : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case : int = self._add_vector(UpperCAmelCase__ )
snake_case : Tuple = loop_post_process(UpperCAmelCase__ )
snake_case : Optional[Any] = self._get_CLIP_loss(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
print('''CLIP loss''' , UpperCAmelCase__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=UpperCAmelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
wandb.init(reinit=UpperCAmelCase__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
snake_case : int = Image.open(UpperCAmelCase__ )
snake_case : Dict = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(UpperCAmelCase__ ) )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if not prompts:
return []
snake_case : Dict = []
snake_case : int = []
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : List[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(UpperCAmelCase__ , (tuple, list) ):
snake_case : str = prompt[0]
snake_case : Dict = float(prompt[1] )
elif ":" in prompt:
snake_case , snake_case : List[Any] = prompt.split(''':''' )
snake_case : Union[str, Any] = float(UpperCAmelCase__ )
else:
snake_case : Any = prompt
snake_case : List[str] = 1.0
processed_prompts.append(UpperCAmelCase__ )
weights.append(UpperCAmelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase__ , device=self.device ),
}
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=None , ):
"""simple docstring"""
if image_path:
snake_case : List[str] = self._get_latent(UpperCAmelCase__ )
else:
snake_case : List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case : Dict = self.process_prompts(UpperCAmelCase__ )
snake_case : Any = self.process_prompts(UpperCAmelCase__ )
if save_final and save_path is None:
snake_case : Optional[int] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
else:
snake_case : Optional[int] = save_path + '''_''' + get_timestamp()
os.makedirs(UpperCAmelCase__ )
snake_case : Any = save_path
snake_case : Any = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(UpperCAmelCase__ ) )
snake_case : Any = loop_post_process(UpperCAmelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ):
if show_intermediate:
show_pil(UpperCAmelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(UpperCAmelCase__ )} )
if show_final:
show_pil(UpperCAmelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 598
|
from math import log
from scipy.constants import Boltzmann, physical_constants
_a : List[str] = 300 # TEMPERATURE (unit = K)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
def UpperCamelCase_ ( self ) -> Dict:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase_ ( self ) -> int:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase_ ( self ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : int = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE : str = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE : int = self.shape
_SCREAMING_SNAKE_CASE : Optional[Any] = int(np.prod(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_coords()
_SCREAMING_SNAKE_CASE : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE : str = self.get_camera_rays(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = rays.view(__lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE : Optional[int] = coords.view(__lowerCamelCase , -1 , 2 )
_SCREAMING_SNAKE_CASE : List[str] = self.resolution()
_SCREAMING_SNAKE_CASE : Optional[int] = self.fov()
_SCREAMING_SNAKE_CASE : Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE : int = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE : int = fracs.view(__lowerCamelCase , -1 , 2 )
_SCREAMING_SNAKE_CASE : Optional[Any] = (
self.z.view(__lowerCamelCase , 1 , 3 )
+ self.x.view(__lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE : Tuple = directions / directions.norm(dim=-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__lowerCamelCase , *__lowerCamelCase , 2 , 3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
for theta in np.linspace(0, 2 * np.pi, num=20 ):
_SCREAMING_SNAKE_CASE : List[str] = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = -z * 4
_SCREAMING_SNAKE_CASE : int = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
_SCREAMING_SNAKE_CASE : List[Any] = np.cross(__lowerCamelCase, __lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase, axis=0 ) ).float(), x=torch.from_numpy(np.stack(__lowerCamelCase, axis=0 ) ).float(), y=torch.from_numpy(np.stack(__lowerCamelCase, axis=0 ) ).float(), z=torch.from_numpy(np.stack(__lowerCamelCase, axis=0 ) ).float(), width=__lowerCamelCase, height=__lowerCamelCase, x_fov=0.7, y_fov=0.7, shape=(1, len(__lowerCamelCase )), )
| 381
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_text_model'
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
_SCREAMING_SNAKE_CASE : str = position_embedding_type
_SCREAMING_SNAKE_CASE : Dict = use_cache
_SCREAMING_SNAKE_CASE : List[str] = pad_token_id
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_vision_model'
def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 6_0_0 , __lowerCamelCase = 2.0 , __lowerCamelCase = 3.1 , __lowerCamelCase = 8 , __lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase = [] , __lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase = 0.25 , __lowerCamelCase = "swish" , __lowerCamelCase = 2_5_6_0 , __lowerCamelCase = "mean" , __lowerCamelCase = 0.02 , __lowerCamelCase = 0.001 , __lowerCamelCase = 0.99 , __lowerCamelCase = 0.2 , **__lowerCamelCase , ) -> Dict:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = num_channels
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : Dict = width_coefficient
_SCREAMING_SNAKE_CASE : str = depth_coefficient
_SCREAMING_SNAKE_CASE : Union[str, Any] = depth_divisor
_SCREAMING_SNAKE_CASE : List[Any] = kernel_sizes
_SCREAMING_SNAKE_CASE : Tuple = in_channels
_SCREAMING_SNAKE_CASE : Optional[int] = out_channels
_SCREAMING_SNAKE_CASE : List[Any] = depthwise_padding
_SCREAMING_SNAKE_CASE : str = strides
_SCREAMING_SNAKE_CASE : List[str] = num_block_repeats
_SCREAMING_SNAKE_CASE : Tuple = expand_ratios
_SCREAMING_SNAKE_CASE : int = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dim
_SCREAMING_SNAKE_CASE : Dict = pooling_type
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = batch_norm_eps
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
_SCREAMING_SNAKE_CASE : int = drop_connect_rate
_SCREAMING_SNAKE_CASE : Tuple = sum(__lowerCamelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align'
__snake_case = True
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=6_4_0 , __lowerCamelCase=1.0 , __lowerCamelCase=0.02 , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
if text_config is None:
_SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
_SCREAMING_SNAKE_CASE : List[str] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Dict = AlignTextConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = AlignVisionConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : List[str] = temperature_init_value
_SCREAMING_SNAKE_CASE : Any = initializer_range
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Any = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 381
| 1
|
def _UpperCAmelCase (UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : int = knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 )
if weights[index] <= max_weight:
_lowerCAmelCase : str = values[index] + knapsack(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_weight - weights[index] , index + 1 )
return max(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = prime_factors(UpperCamelCase_ )
if is_square_free(UpperCamelCase_ ):
return -1 if len(UpperCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = mock.Mock()
__lowerCAmelCase : List[Any] = 500
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Any = HTTPError
__lowerCAmelCase : int = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase : Dict = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_snake_case ) as mock_head:
__lowerCAmelCase : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCAmelCase__ ( self : Dict )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = mock.Mock()
__lowerCAmelCase : List[str] = 500
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : Optional[Any] = HTTPError
__lowerCAmelCase : str = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase : Optional[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_snake_case ) as mock_head:
__lowerCAmelCase : Union[str, Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
try:
__lowerCAmelCase : Tuple = tempfile.mktemp()
with open(_snake_case , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , _snake_case )
__lowerCAmelCase : str = AlbertTokenizer.from_pretrained(_snake_case )
finally:
os.remove(_snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , _snake_case )
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def UpperCAmelCase__ ( self : Optional[Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class snake_case_ ( unittest.TestCase ):
A_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def UpperCAmelCase__ ( cls : List[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] )->Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : str )->Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Union[str, Any] = os.path.join(_snake_case , """vocab.txt""" )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase : Tuple = BertTokenizer(_snake_case )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
__lowerCAmelCase : List[str] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case , repo_id="""test-tokenizer""" , push_to_hub=_snake_case , use_auth_token=self._token )
__lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCAmelCase__ ( self : int )->Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = os.path.join(_snake_case , """vocab.txt""" )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase : Optional[Any] = BertTokenizer(_snake_case )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
__lowerCAmelCase : Tuple = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_snake_case , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=_snake_case , use_auth_token=self._token )
__lowerCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCAmelCase__ ( self : List[Any] )->str:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = os.path.join(_snake_case , """vocab.txt""" )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase : Optional[int] = CustomTokenizer(_snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Dict = os.path.join(_snake_case , """vocab.txt""" )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase : int = BertTokenizerFast.from_pretrained(_snake_case )
bert_tokenizer.save_pretrained(_snake_case )
__lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(_snake_case )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=_snake_case , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
__lowerCAmelCase : Any = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def UpperCAmelCase__ ( self : Optional[int] )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = Trie()
__lowerCAmelCase : Union[str, Any] = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_snake_case , ["""AB""", """C"""] )
| 240
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 240
| 1
|
class _A : # Public class to implement a graph
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
__lowercase = row
__lowercase = col
__lowercase = graph
def _snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _snake_case ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
__lowercase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowercase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowercase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase )
def _snake_case ( self : Optional[int] ): # And finally, count all islands.
'''simple docstring'''
__lowercase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowercase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
count += 1
return count
| 402
|
from math import sqrt
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_1 ):
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 402
| 1
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : Dict = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : Tuple = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Tuple = value
else:
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
SCREAMING_SNAKE_CASE : Optional[int] = None
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : List[str] = True
elif name.split(".")[0] == "proj":
SCREAMING_SNAKE_CASE : int = fairseq_model.proj
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
SCREAMING_SNAKE_CASE : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : List[Any] = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Optional[Any] = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : int = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : Any = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE : int = "weight"
else:
SCREAMING_SNAKE_CASE : List[Any] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
return proj_weight
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : List[str] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Any = name.split(".")
SCREAMING_SNAKE_CASE : int = int(items[0])
SCREAMING_SNAKE_CASE : List[Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(_a , _a , bias=_a)
SCREAMING_SNAKE_CASE : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : str = f.readlines()
SCREAMING_SNAKE_CASE : Optional[int] = [line.split(" ")[0] for line in lines]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a)
SCREAMING_SNAKE_CASE : Optional[int] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_a , range(4 , num_words + 4))))
return vocab_dict
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a , ):
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig.from_pretrained(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
_a , vocab_size=_a , decoder_layers=_a , do_stable_layer_norm=_a)
SCREAMING_SNAKE_CASE : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_a , return_attention_mask=_a , )
SCREAMING_SNAKE_CASE : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : Any = WavaVecaModel(_a)
SCREAMING_SNAKE_CASE : Dict = recursively_load_weights_wavaveca(model.encoder , _a)
SCREAMING_SNAKE_CASE : Dict = SpeechaTextaForCausalLM(_a)
SCREAMING_SNAKE_CASE : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a)
# set output linear layer
unexpected_keys.remove("embed_out")
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}")
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechEncoderDecoderModel(encoder=_a , decoder=_a)
SCREAMING_SNAKE_CASE : Optional[int] = False
# add projection layer
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(projection_layer.weight)
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(projection_layer.bias)
SCREAMING_SNAKE_CASE : int = create_vocab_dict(_a)
with open(os.path.join(_a , "vocab.json") , "w") as fp:
json.dump(_a , _a)
SCREAMING_SNAKE_CASE : Any = SpeechaTextaTokenizer(os.path.join(_a , "vocab.json"))
tokenizer.save_pretrained(_a)
SCREAMING_SNAKE_CASE : Optional[int] = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = "speech_to_text_2"
SCREAMING_SNAKE_CASE : Any = "wav2vec2"
SCREAMING_SNAKE_CASE : str = SpeechEncoderDecoderConfig.from_dict(_a)
hf_wavavec.save_pretrained(_a)
feature_extractor.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 715
|
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (0, 0)
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Any = 0
def __eq__( self : List[str] , a : Any ) -> Tuple:
"""simple docstring"""
return self.position == cell.position
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
print(self.position )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a : List[Any]=(5, 5) ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(a )
SCREAMING_SNAKE_CASE : str = world_size[0]
SCREAMING_SNAKE_CASE : int = world_size[1]
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
print(self.w )
def __UpperCamelCase ( self : Dict , a : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = cell.position[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = cell.position[1]
SCREAMING_SNAKE_CASE : Optional[int] = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE : Optional[Any] = current_x + n[0]
SCREAMING_SNAKE_CASE : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE : int = Cell()
SCREAMING_SNAKE_CASE : str = (x, y)
SCREAMING_SNAKE_CASE : Any = cell
neighbours.append(a )
return neighbours
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = []
_open.append(_a)
while _open:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmin([n.f for n in _open])
SCREAMING_SNAKE_CASE : List[str] = _open[min_f]
_closed.append(_open.pop(_a))
if current == goal:
break
for n in world.get_neigbours(_a):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE : Tuple = current.g + 1
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = n.position
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = goal.position
SCREAMING_SNAKE_CASE : Any = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_a)
SCREAMING_SNAKE_CASE : Tuple = []
while current.parent is not None:
path.append(current.position)
SCREAMING_SNAKE_CASE : Union[str, Any] = current.parent
path.append(current.position)
return path[::-1]
if __name__ == "__main__":
a_ = Gridworld()
# Start position and goal
a_ = Cell()
a_ = (0, 0)
a_ = Cell()
a_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ = 1
print(world.w)
| 193
| 0
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
def extract(*lowerCAmelCase_ : str , **lowerCAmelCase_ : List[Any] ):
class A :
def __init__( self : Dict ) -> int:
"""simple docstring"""
_a = torch.ones([0] )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(pipe.scheduler , lowerCAmelCase_ )
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_a = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
_a = unet.half()
_a = vae.half()
_a = bert.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
_a = 40_03_66_03_46
_a = 7
# without safety guidance (sld_guidance_scale = 0)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
_a = 27_34_97_17_55
_a = 7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
_a = 10_44_35_52_34
_a = 12
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _A ( __magic_name__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 511
| 0
|
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : List[Any] = 0
while number > 0:
_a : Optional[Any] = number % 10
sum_of_digits += last_digit
_a : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( UpperCamelCase_ = 100 ):
_a : Optional[int] = factorial(snake_case__ )
_a : List[str] = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 706
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : str = None , __snake_case : uuid.UUID = None , __snake_case : Union[str, Any]=None , __snake_case : str=None ) -> List[str]:
if not conversation_id:
_a : Optional[int] = uuid.uuida()
if past_user_inputs is None:
_a : int = []
if generated_responses is None:
_a : str = []
_a : uuid.UUID = conversation_id
_a : List[str] = past_user_inputs
_a : List[str] = generated_responses
_a : Optional[str] = text
def __eq__( self : str , __snake_case : Optional[Any] ) -> str:
if not isinstance(__snake_case , __snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case_ ( self : Dict , __snake_case : str , __snake_case : bool = False ) -> str:
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
_a : Optional[Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_a : Optional[Any] = text
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_a : Union[str, Any] = None
def snake_case_ ( self : int , __snake_case : str ) -> Any:
self.generated_responses.append(__snake_case )
def snake_case_ ( self : Any ) -> Dict:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
_a : Tuple = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_a : Optional[Any] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any ) -> Dict:
super().__init__(*__snake_case , **__snake_case )
if self.tokenizer.pad_token_id is None:
_a : Tuple = self.tokenizer.eos_token
def snake_case_ ( self : Any , __snake_case : int=None , __snake_case : int=None , __snake_case : Union[str, Any]=None , **__snake_case : Optional[int] ) -> Any:
_a : Any = {}
_a : Optional[int] = {}
_a : Tuple = {}
if min_length_for_response is not None:
_a : Any = min_length_for_response
if minimum_tokens is not None:
_a : str = minimum_tokens
if "max_length" in generate_kwargs:
_a : str = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_a : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __snake_case : Union[Conversation, List[Conversation]] , __snake_case : Union[str, Any]=0 , **__snake_case : Tuple ) -> Optional[int]:
_a : List[str] = super().__call__(__snake_case , num_workers=__snake_case , **__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) == 1:
return outputs[0]
return outputs
def snake_case_ ( self : List[Any] , __snake_case : Conversation , __snake_case : Optional[Any]=32 ) -> Dict[str, Any]:
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
_a : str = self.tokenizer._build_conversation_input_ids(__snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_a : Any = self._legacy_parse_and_tokenize(__snake_case )
if self.framework == "pt":
_a : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_a : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case_ ( self : str , __snake_case : Any , __snake_case : Optional[int]=10 , **__snake_case : Optional[int] ) -> int:
_a : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
_a : List[str] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_a : Optional[Any] = max_length - minimum_tokens
_a : List[Any] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
_a : int = model_inputs['''attention_mask'''][:, -trim:]
_a : Any = model_inputs.pop('''conversation''' )
_a : int = max_length
_a : Any = self.model.generate(**__snake_case , **__snake_case )
if self.model.config.is_encoder_decoder:
_a : List[Any] = 1
else:
_a : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case_ ( self : List[Any] , __snake_case : Dict , __snake_case : Dict=True ) -> Optional[int]:
_a : int = model_outputs['''output_ids''']
_a : int = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , )
_a : Optional[Any] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__snake_case )
return conversation
def snake_case_ ( self : List[Any] , __snake_case : Conversation ) -> Dict:
_a : Any = self.tokenizer.eos_token_id
_a : Dict = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
if len(__snake_case ) > self.tokenizer.model_max_length:
_a : int = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 249
| 0
|
'''simple docstring'''
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : list[int] , _UpperCamelCase : int )-> int:
"""simple docstring"""
def count_of_possible_combinations(_UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_UpperCamelCase )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : list[int] , _UpperCamelCase : int )-> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
_UpperCamelCase : int , _UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_UpperCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , _UpperCamelCase )
for item in array )
_UpperCamelCase = answer
return answer
_UpperCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_UpperCamelCase , _UpperCamelCase )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : list[int] , _UpperCamelCase : int )-> int:
"""simple docstring"""
_UpperCamelCase = [0] * (target + 1)
_UpperCamelCase = 1
for i in range(1 , target + 1 ):
for j in range(_UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : str = 3
snake_case_ : Optional[int] = 5
snake_case_ : Dict = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 138
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Any = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = GPTSwaTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = False
def a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = GPTSwaTokenizer(A_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , A_ ):
_UpperCamelCase = "This is a test"
_UpperCamelCase = "This is a test"
return input_text, output_text
def a ( self ):
_UpperCamelCase = "<s>"
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def a ( self ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(A_ ) , 20_00 )
def a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def a ( self ):
_UpperCamelCase = GPTSwaTokenizer(A_ )
_UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
A_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
_UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def a ( self ):
_UpperCamelCase = GPTSwaTokenizer(A_ )
_UpperCamelCase = ["This is a test", "I was born in 92000, and this is falsé."]
_UpperCamelCase = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def a ( self ):
_UpperCamelCase = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_UpperCamelCase = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=A_ , )
| 138
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: CLIPSegForImageSegmentation , _SCREAMING_SNAKE_CASE: CLIPSegProcessor , _SCREAMING_SNAKE_CASE: AutoencoderKL , _SCREAMING_SNAKE_CASE: CLIPTextModel , _SCREAMING_SNAKE_CASE: CLIPTokenizer , _SCREAMING_SNAKE_CASE: UNetaDConditionModel , _SCREAMING_SNAKE_CASE: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _SCREAMING_SNAKE_CASE: StableDiffusionSafetyChecker , _SCREAMING_SNAKE_CASE: CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCamelCase_ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = dict(scheduler.config )
UpperCamelCase_ = 1
UpperCamelCase_ = FrozenDict(_SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase_ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = dict(scheduler.config )
UpperCamelCase_ = True
UpperCamelCase_ = FrozenDict(_SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_SCREAMING_SNAKE_CASE , segmentation_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, int]] = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
def lowercase ( self: str ) -> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self: Any , _SCREAMING_SNAKE_CASE: Union[str, List[str]] , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int = 512 , _SCREAMING_SNAKE_CASE: int = 512 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: float = 7.5 , _SCREAMING_SNAKE_CASE: Optional[Union[str, List[str]]] = None , _SCREAMING_SNAKE_CASE: Optional[int] = 1 , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: Optional[torch.Generator] = None , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _SCREAMING_SNAKE_CASE: int = 1 , **_SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCamelCase_ = self.segmentation_model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , )
| 371
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCAmelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_UpperCAmelCase = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]="<s>" , _SCREAMING_SNAKE_CASE: Optional[int]="</s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<pad>" , _SCREAMING_SNAKE_CASE: int="<mask>" , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Tuple=False , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ = 1
UpperCamelCase_ = len(self.sp_model )
UpperCamelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase_ = src_lang if src_lang is not None else "eng_Latn"
UpperCamelCase_ = self.lang_code_to_id[self._src_lang]
UpperCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
UpperCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> None:
"""simple docstring"""
UpperCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [1] * len(self.prefix_tokens )
UpperCamelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] , _SCREAMING_SNAKE_CASE: Optional[str] , **_SCREAMING_SNAKE_CASE: Tuple ) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase_ = src_lang
UpperCamelCase_ = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tgt_lang_id
return inputs
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: str ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str = "eng_Latn" , _SCREAMING_SNAKE_CASE: Optional[List[str]] = None , _SCREAMING_SNAKE_CASE: str = "fra_Latn" , **_SCREAMING_SNAKE_CASE: List[str] , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ = src_lang
UpperCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> Optional[int]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
UpperCamelCase_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase_ = []
UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ = [self.cur_lang_code]
UpperCamelCase_ = [self.eos_token_id]
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> None:
"""simple docstring"""
UpperCamelCase_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase_ = []
UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ = [self.cur_lang_code]
UpperCamelCase_ = [self.eos_token_id]
| 371
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __A ( a , a ):
"""simple docstring"""
A_ = 'bit'
A_ = ['preactivation', 'bottleneck']
A_ = ['SAME', 'VALID']
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=6_4 , _lowerCamelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCamelCase=[3, 4, 6, 3] , _lowerCamelCase="preactivation" , _lowerCamelCase="relu" , _lowerCamelCase=None , _lowerCamelCase=3_2 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=3_2 , _lowerCamelCase=1 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , )-> Dict:
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase__ = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
lowercase__ = num_channels
lowercase__ = embedding_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = layer_type
lowercase__ = hidden_act
lowercase__ = global_padding
lowercase__ = num_groups
lowercase__ = drop_path_rate
lowercase__ = embedding_dynamic_padding
lowercase__ = output_stride
lowercase__ = width_factor
lowercase__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 161
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowercase : str , lowercase : str , **lowercase : Tuple ) ->Tuple:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained(lowercase , **lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_config(lowercase )
model.save_pretrained(lowercase )
AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 161
| 1
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = GPTSanJapaneseTokenizer
lowerCamelCase :List[Any] = False
lowerCamelCase :List[str] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def UpperCAmelCase ( self ) -> int:
super().setUp()
# fmt: off
_A = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
_A = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
_A = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A , _A = self.get_input_output_texts(lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def UpperCAmelCase ( self ) -> str:
pass # TODO add if relevant
def UpperCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def UpperCAmelCase ( self ) -> Any:
_A = self.get_tokenizer()
# Testing tokenization
_A = """こんにちは、世界。 こんばんは、㔺界。"""
_A = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
_A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A = tokens + [tokenizer.unk_token]
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_tokenizer()
# Testing tokenization
_A = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
_A = """こんにちは、、、、世界。こんばんは、、、、世界。"""
_A = tokenizer.encode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
_A = """こんにちは、世界。"""
_A = """こんばんは、㔺界。😀"""
_A = """こんにちは、世界。こんばんは、世界。😀"""
_A = tokenizer.encode(prefix_text + input_text )
_A = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
_A = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
_A = """こんにちは、世界。"""
_A = """こんばんは、㔺界。😀"""
_A = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A = [1] + [0] * (len_prefix + len_text + 1)
_A = [1] * (len_prefix + len_text + 1) + [0]
_A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A = tokenizer(prefix_text + input_text ).token_type_ids
_A = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
_A = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
_A = tokenizer.encode("""あンいワ""" )
_A = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
_A = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
_A = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self ) -> Dict:
# tokenizer has no padding token
pass
| 716
|
import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83
| 0
|
'''simple docstring'''
def __A ( a_ : int ,a_ : float ,a_ : float ):
return round(float(moles / volume ) * nfactor )
def __A ( a_ : float ,a_ : float ,a_ : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __A ( a_ : float ,a_ : float ,a_ : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __A ( a_ : float ,a_ : float ,a_ : float ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 525
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525
| 1
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : str = ["audio_values", "audio_mask"]
def __init__( self : Tuple , a__ : str=2048 , a__ : str=1 , a__ : Any=[16, 16] , a__ : List[str]=128 , a__ : Tuple=4_4100 , a__ : Dict=86 , a__ : List[str]=2048 , a__ : int=0.0 , **a__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , **a__ , )
lowerCAmelCase__ : Optional[Any] = spectrogram_length
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : str = feature_size // self.patch_size[1]
lowerCAmelCase__ : Optional[int] = n_fft
lowerCAmelCase__ : Tuple = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ : Optional[Any] = sampling_rate
lowerCAmelCase__ : Tuple = padding_value
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , ).T
def _A ( self : Optional[int] , a__ : np.array ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
lowerCAmelCase__ : str = log_spec[:, :-1]
lowerCAmelCase__ : List[Any] = log_spec - 20.0
lowerCAmelCase__ : Tuple = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = True , a__ : Optional[int] = None , a__ : bool = False , a__ : bool = False , **a__ : Optional[int] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase__ : List[Any] = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : List[str] = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
lowerCAmelCase__ : Tuple = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Optional[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a__ ):
lowerCAmelCase__ : Optional[Any] = [np.asarray(a__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ : int = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ : List[Any] = np.array(a__ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ : Optional[Any] = np.ones([len(a__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ : str = padded_audio_features * self.padding_value
for i in range(len(a__ ) ):
lowerCAmelCase__ : int = audio_features[i]
lowerCAmelCase__ : str = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ : Tuple = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowerCAmelCase__ : int = {"audio_values": padded_audio_features}
lowerCAmelCase__ : int = BatchFeature(data=a__ , tensor_type=a__ )
return encoded_inputs
| 706
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case = logging.getLogger(__name__)
snake_case = 50 # max width of layer names
snake_case = 70 # max width of quantizer names
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCamelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCamelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCamelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCamelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCamelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCamelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ : int = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ : List[str] = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ : Any = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowerCAmelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ )
lowerCAmelCase__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase_ , ["embeddings"] , which="weight" , _disabled=lowerCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase_ , [""] , _disabled=lowerCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase_ , lowerCamelCase_ )
if args.clip_gelu:
clip_gelu(lowerCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def fusea(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ : Optional[int] = qq._amax.detach().item()
lowerCAmelCase__ : Optional[int] = qk._amax.detach().item()
lowerCAmelCase__ : Any = qv._amax.detach().item()
lowerCAmelCase__ : List[str] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
qq._amax.fill_(lowerCamelCase_ )
qk._amax.fill_(lowerCamelCase_ )
qv._amax.fill_(lowerCamelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ )
lowerCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ : Optional[int] = mod.weight.shape[0]
lowerCAmelCase__ : List[str] = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ : List[str] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCAmelCase__ : str = amax
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=2_5 , lowerCamelCase_=1_8_0 , lowerCamelCase_=None ):
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ : str = []
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : List[Any] = [ignore]
lowerCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase_ , "weight" ):
continue
lowerCAmelCase__ : str = max(lowerCamelCase_ , len(lowerCamelCase_ ) )
for name, mod in model.named_modules():
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , "weight" ):
continue
if type(lowerCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]:
continue
lowerCAmelCase__ : List[Any] = f'''Act:{input_q.extra_repr()}'''
lowerCAmelCase__ : Optional[int] = f'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase__ : int = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase_ ) <= line_width:
logger.info(lowerCamelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase_ , lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="both" , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_input_quantizer" ) or hasattr(lowerCamelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
| 568
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = len(_lowercase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowercase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
if len(_lowercase ) <= 1:
return arr, 0
lowerCamelCase_ : Any = len(_lowercase ) // 2
lowerCamelCase_ : Tuple = arr[0:mid]
lowerCamelCase_ : Union[str, Any] = arr[mid:]
lowerCamelCase_, lowerCamelCase_ : List[str] = count_inversions_recursive(_lowercase )
lowerCamelCase_, lowerCamelCase_ : Dict = count_inversions_recursive(_lowercase )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = _count_cross_inversions(_lowercase , _lowercase )
lowerCamelCase_ : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = 0
while i < len(_lowercase ) and j < len(_lowercase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowercase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowercase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase_ : List[str] = count_inversions_bf(_lowercase )
lowerCamelCase_, lowerCamelCase_ : List[Any] = count_inversions_recursive(_lowercase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _lowercase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase_ : Union[str, Any] = count_inversions_bf(_lowercase )
lowerCamelCase_, lowerCamelCase_ : Tuple = count_inversions_recursive(_lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _lowercase )
# an empty list should also have zero inversions
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Tuple = count_inversions_bf(_lowercase )
lowerCamelCase_, lowerCamelCase_ : List[str] = count_inversions_recursive(_lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _lowercase )
if __name__ == "__main__":
main()
| 422
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "luke"
def __init__(self , A=5_0_2_6_7 , A=5_0_0_0_0_0 , A=7_6_8 , A=2_5_6 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=True , A=None , A=1 , A=0 , A=2 , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = entity_vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : str = entity_emb_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = use_entity_aware_attention
lowerCamelCase_ : Optional[Any] = classifier_dropout
| 422
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = Dict[str, Any]
UpperCamelCase_ = List[Prediction]
@add_end_docstrings(lowercase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Any , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self : Dict , **UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict ={}
if "threshold" in kwargs:
lowercase : str =kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =load_image(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.IntTensor([[image.height, image.width]] )
lowercase : int =self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
lowercase : int =self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
lowercase : str =target_size
return inputs
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =model_inputs.pop('''target_size''' )
lowercase : Union[str, Any] =self.model(**UpperCAmelCase__ )
lowercase : Any =outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
lowercase : List[str] =model_inputs['''bbox''']
return model_outputs
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=0.9 ):
'''simple docstring'''
lowercase : Dict =model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase , lowercase : Union[str, Any] =target_size[0].tolist()
def unnormalize(UpperCAmelCase__ : Union[str, Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowercase , lowercase : List[Any] =model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase : Optional[int] =[self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase : Dict =[unnormalize(UpperCAmelCase__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
lowercase : List[str] =['''score''', '''label''', '''box''']
lowercase : Any =[dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for vals in zip(scores.tolist() , UpperCAmelCase__ , UpperCAmelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase : Tuple =self.image_processor.post_process_object_detection(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =raw_annotations[0]
lowercase : Dict =raw_annotation['''scores''']
lowercase : Optional[Any] =raw_annotation['''labels''']
lowercase : List[str] =raw_annotation['''boxes''']
lowercase : List[Any] =scores.tolist()
lowercase : str =[self.model.config.idalabel[label.item()] for label in labels]
lowercase : List[str] =[self._get_bounding_box(UpperCAmelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase : str =['''score''', '''label''', '''box''']
lowercase : Optional[int] =[
dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
lowercase , lowercase , lowercase , lowercase : Union[str, Any] =box.int().tolist()
lowercase : Optional[Any] ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 88
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : Optional[int] ={
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =[
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ : Optional[int] = ['''text''', '''image''', '''audio''']
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
inputs.append(create_inputs(_lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for output in outputs:
if isinstance(_lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class snake_case :
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCAmelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self : List[Any] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def UpperCAmelCase ( self : Tuple ) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCAmelCase ( self : List[Any] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
UpperCAmelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( self : List[str] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 392
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Any:
lowercase : List[str] = parent
lowercase : str = batch_size
lowercase : int = seq_length
lowercase : Any = is_training
lowercase : List[Any] = use_input_mask
lowercase : str = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Dict = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : int = num_labels
lowercase : int = num_choices
lowercase : Tuple = scope
def a__ ( self ) -> Dict:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
lowercase : str = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
lowercase : Tuple = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , a_ )
lowercase : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
lowercase : List[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> int:
lowercase : Optional[Any] = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase : List[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : List[Any] = self.num_labels
lowercase : Optional[Any] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowercase : Optional[int] = self.num_choices
lowercase : Tuple = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Tuple = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Tuple:
lowercase : Any = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : int = config_and_inputs
lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def a__ ( self ) -> int:
lowercase : str = DistilBertModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=a_ , dim=3_7 )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def a__ ( self ) -> Tuple:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def a__ ( self ) -> List[str]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def a__ ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def a__ ( self ) -> Tuple:
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase : Any = True
lowercase : List[str] = model_class(config=a_ )
lowercase : Optional[int] = self._prepare_for_class(a_ , a_ )
lowercase : Union[str, Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowercase : str = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> List[str]:
lowercase : Dict = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
lowercase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
lowercase : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 425
|
'''simple docstring'''
def _A ( A ) -> bool:
return str(A ) == str(A )[::-1]
def _A ( A ) -> int:
return int(A ) + int(str(A )[::-1] )
def _A ( A = 1_0_0_0_0 ) -> int:
lowercase : List[Any] = []
for num in range(1 ,A ):
lowercase : str = 0
lowercase : Optional[Any] = num
while iterations < 5_0:
lowercase : Optional[Any] = sum_reverse(A )
iterations += 1
if is_palindrome(A ):
break
else:
lychrel_nums.append(A )
return len(A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 425
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (snake_case__ , unittest.TestCase ):
_UpperCamelCase = CTRLTokenizer
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__lowerCAmelCase : Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__lowerCAmelCase : Dict = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__lowerCAmelCase : List[Any] = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def UpperCamelCase__ ( self , **A_ ) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def UpperCamelCase__ ( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[str] = '''adapt react readapt apt'''
__lowerCAmelCase : Tuple = '''adapt react readapt apt'''
return input_text, output_text
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase : str = '''adapt react readapt apt'''
__lowerCAmelCase : Union[str, Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__lowerCAmelCase : Dict = tokens + [tokenizer.unk_token]
__lowerCAmelCase : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
| 492
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_SCREAMING_SNAKE_CASE : Any = 25_00_04
_SCREAMING_SNAKE_CASE : Any = 25_00_20
@require_sentencepiece
@require_tokenizers
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = MBartaaTokenizer(__snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self ):
snake_case = '''<s>'''
snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def a_ ( self ):
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__snake_case ) , 1_0_5_4 )
def a_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def a_ ( self ):
snake_case = MBartaaTokenizer(__snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__snake_case )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
snake_case = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def a_ ( self ):
# fmt: off
snake_case = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def a_ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(__snake_case )
snake_case = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(__snake_case )
snake_case = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
snake_case = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(__snake_case )
snake_case = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
snake_case = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(__snake_case )
snake_case = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'facebook/mbart-large-50-one-to-many-mmt'
__magic_name__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__magic_name__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__magic_name__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def a_ ( cls ):
snake_case = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case = 1
return cls
def a_ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def a_ ( self ):
snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def a_ ( self ):
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
snake_case = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def a_ ( self ):
snake_case = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __snake_case )
snake_case = 1_0
snake_case = self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__snake_case ) , __snake_case )
def a_ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def a_ ( self ):
snake_case = tempfile.mkdtemp()
snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
snake_case = MBartaaTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def a_ ( self ):
snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors='''pt''' )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self ):
snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a_ ( self ):
snake_case = self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='''pt''' )
snake_case = self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=1_0 , return_tensors='''pt''' )
snake_case = targets['''input_ids''']
snake_case = shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def a_ ( self ):
snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 550
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = IMAGENET_DEFAULT_MEAN , __UpperCAmelCase = IMAGENET_DEFAULT_STD , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Tuple =do_resize
SCREAMING_SNAKE_CASE_ : Dict =size
SCREAMING_SNAKE_CASE_ : Tuple =resample
SCREAMING_SNAKE_CASE_ : List[str] =do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] =crop_size
SCREAMING_SNAKE_CASE_ : int =do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : Any =do_normalize
SCREAMING_SNAKE_CASE_ : Tuple =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE_ : Tuple =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[str] =int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__UpperCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Dict =[self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Any =[self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : List[str] =[self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple ={'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 153
| 1
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , _A : int = 1 , _A : int = 1_6000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7600 , _A : float = 1e-10 , _A : int = 2 , _A : bool = True , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : Any = win_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_function
__SCREAMING_SNAKE_CASE : str = frame_signal_scale
__SCREAMING_SNAKE_CASE : Tuple = fmin
__SCREAMING_SNAKE_CASE : Any = fmax
__SCREAMING_SNAKE_CASE : Dict = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduction_factor
__SCREAMING_SNAKE_CASE : List[str] = win_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : List[Any] = hop_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : Union[str, Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : str = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_A , np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
normed_input_values.append(_A )
else:
__SCREAMING_SNAKE_CASE : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Dict , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : str = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE : str = inputs_target['''input_values''']
__SCREAMING_SNAKE_CASE : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE : Tuple = [self._extract_mel_features(_A ) for waveform in speech]
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_values''': features} )
__SCREAMING_SNAKE_CASE : Any = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE : Dict = BatchFeature({'''input_values''': speech} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__SCREAMING_SNAKE_CASE : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = input_values.astype(np.floataa )
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : str = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 74
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( __lowerCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_a = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_a = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_a = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_a = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_a = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_a = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_a = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_a = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_a = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_a = key.replace("image_encoder.module" , "flava.image_model" )
_a = key.replace("text_encoder.module" , "flava.text_model" )
_a = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_a = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_a = key.replace("text_projection" , "flava.text_projection" )
_a = key.replace("image_projection" , "flava.image_projection" )
_a = value.float()
for key, value in codebook_state_dict.items():
_a = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int=None ) -> Any:
'''simple docstring'''
if config_path is not None:
_a = FlavaConfig.from_pretrained(__lowerCamelCase )
else:
_a = FlavaConfig()
_a = FlavaForPreTraining(__lowerCamelCase ).eval()
_a = convert_dalle_checkpoint(__lowerCamelCase , __lowerCamelCase , save_checkpoint=__lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
_a = torch.load(__lowerCamelCase , map_location="cpu" )
else:
_a = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="cpu" )
_a = upgrade_state_dict(__lowerCamelCase , __lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_a = hf_model.state_dict()
_a = count_parameters(__lowerCamelCase )
_a = count_parameters(__lowerCamelCase ) + count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 276
|
'''simple docstring'''
lowercase__ = 65_521
def __UpperCamelCase ( __lowerCamelCase : str ) -> int:
'''simple docstring'''
_a = 1
_a = 0
for plain_chr in plain_text:
_a = (a + ord(__lowerCamelCase )) % MOD_ADLER
_a = (b + a) % MOD_ADLER
return (b << 16) | a
| 276
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = IFPipeline
__a : str = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__a : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self ):
return self._get_dummy_components()
def snake_case ( self ,snake_case__ ,snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self ):
self._test_save_load_local()
def snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
# if
SCREAMING_SNAKE_CASE_ : str = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Any = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa ,text_encoder=snake_case__ ,tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = pipe_a.encode_prompt('anime turtle' ,device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE_ : Tuple = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE_ : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE_ : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE_ : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,num_inference_steps=2 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,generator=snake_case__ ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : int = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,num_inference_steps=2 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 256, 256) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,original_image=snake_case__ ,generator=snake_case__ ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,mask_image=snake_case__ ,num_inference_steps=2 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 256, 256) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 256, 256) ,rng=random.Random(1 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,mask_image=snake_case__ ,original_image=snake_case__ ,generator=snake_case__ ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 105
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = ShapEImgaImgPipeline
lowerCAmelCase_ = ['image']
lowerCAmelCase_ = ['image']
lowerCAmelCase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def _snake_case ( self : List[str] ) -> Any:
return 3_2
@property
def _snake_case ( self : int ) -> Dict:
return 3_2
@property
def _snake_case ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def _snake_case ( self : Tuple ) -> Optional[Any]:
return 8
@property
def _snake_case ( self : int ) -> Dict:
torch.manual_seed(0 )
_lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowerCamelCase = CLIPVisionModel(snake_case__ )
return model
@property
def _snake_case ( self : Tuple ) -> Any:
_lowerCamelCase = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def _snake_case ( self : List[str] ) -> Any:
torch.manual_seed(0 )
_lowerCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCamelCase = PriorTransformer(**snake_case__ )
return model
@property
def _snake_case ( self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
_lowerCamelCase = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCamelCase = ShapERenderer(**snake_case__ )
return model
def _snake_case ( self : Any ) -> str:
_lowerCamelCase = self.dummy_prior
_lowerCamelCase = self.dummy_image_encoder
_lowerCamelCase = self.dummy_image_processor
_lowerCamelCase = self.dummy_renderer
_lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
_lowerCamelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _snake_case ( self : List[str] , snake_case__ : str , snake_case__ : Optional[Any]=0 ) -> Union[str, Any]:
_lowerCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
_lowerCamelCase = torch.manual_seed(snake_case__ )
else:
_lowerCamelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCamelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
_lowerCamelCase = 'cpu'
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**snake_case__ )
_lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCamelCase = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCamelCase = output.images[0]
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Dict ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self : Optional[Any] ) -> Tuple:
_lowerCamelCase = torch_device == 'cpu'
_lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def _snake_case ( self : Any ) -> Tuple:
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**snake_case__ )
_lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCamelCase = batch_size * [inputs[key]]
_lowerCamelCase = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[str] ) -> List[Any]:
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCamelCase = torch.Generator(device=snake_case__ ).manual_seed(0 )
_lowerCamelCase = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 544
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1.0E4 , lowerCamelCase_ = False , lowerCamelCase_ = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'Embedding dimension {embedding_dim} should be even'
A : Union[str, Any] = float(embedding_dim // 2 )
A : Union[str, Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
A : Tuple = min_timescale * jnp.exp(jnp.arange(lowerCamelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
A : Any = jnp.expand_dims(lowerCamelCase_ , 1 ) * jnp.expand_dims(lowerCamelCase_ , 0 )
# scale embeddings
A : int = scale * emb
if flip_sin_to_cos:
A : List[Any] = jnp.concatenate([jnp.cos(lowerCamelCase_ ), jnp.sin(lowerCamelCase_ )] , axis=1 )
else:
A : List[Any] = jnp.concatenate([jnp.sin(lowerCamelCase_ ), jnp.cos(lowerCamelCase_ )] , axis=1 )
A : Any = jnp.reshape(lowerCamelCase_ , [jnp.shape(lowerCamelCase_ )[0], embedding_dim] )
return signal
class __lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ : int = 32
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __UpperCAmelCase ) -> Tuple:
A : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(__UpperCAmelCase )
A : Optional[int] = nn.silu(__UpperCAmelCase )
A : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(__UpperCAmelCase )
return temb
class __lowercase ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ : int = 32
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __UpperCAmelCase ) -> int:
return get_sinusoidal_embeddings(
__UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 423
|
import heapq
import sys
import numpy as np
lowercase : Optional[int] = tuple[int, int]
class __lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A : List[str] = []
A : str = set()
def snake_case ( self ) -> str:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case ( self ) -> Dict:
return len(self.elements ) == 0
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__UpperCAmelCase )
else:
# update
# print("update", item)
A : int = []
((A) , (A)) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)) : Any = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case ( self , __UpperCAmelCase ) -> Dict:
if item in self.set:
self.set.remove(__UpperCAmelCase )
A : str = []
((A) , (A)) : List[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)) : Optional[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case ( self ) -> List[str]:
return self.elements[0][1]
def snake_case ( self ) -> Optional[int]:
((A) , (A)) : int = heapq.heappop(self.elements )
self.set.remove(__UpperCAmelCase )
return (priority, item)
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
# euclidean distance
A : int = np.array(lowerCamelCase_ )
A : Tuple = np.array(lowerCamelCase_ )
return np.linalg.norm(a - b )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
# integer division by time variable
return consistent_heuristic(lowerCamelCase_ , lowerCamelCase_ ) // t
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[Any] = g_function[start] + Wa * heuristics[i](lowerCamelCase_ , lowerCamelCase_ )
return ans
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Union[str, Any] = np.chararray((n, n) )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
A : List[str] = '''*'''
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if (j, (n - 1) - i) in blocks:
A : List[Any] = '''#'''
A : Tuple = '''-'''
A : Optional[Any] = back_pointer[goal]
while x != start:
((A) , (A)) : Union[str, Any] = x
# print(x)
A : str = '''-'''
A : Union[str, Any] = back_pointer[x]
A : Union[str, Any] = '''-'''
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A : Any = back_pointer[goal]
while x != start:
print(lowerCamelCase_ , end=''' ''' )
A : List[Any] = back_pointer[x]
print(lowerCamelCase_ )
sys.exit()
def snake_case__ ( lowerCamelCase_ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
for itera in range(lowerCamelCase_ ):
open_list[itera].remove_element(lowerCamelCase_ )
# print("s", s)
# print("j", j)
((A) , (A)) : Tuple = s
A : Any = (x - 1, y)
A : Dict = (x + 1, y)
A : Union[str, Any] = (x, y + 1)
A : Tuple = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase_ )
A : Optional[int] = -1
A : Dict = float('''inf''' )
if valid(lowerCamelCase_ ) and g_function[neighbours] > g_function[s] + 1:
A : Optional[int] = g_function[s] + 1
A : Any = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase_ , key(lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase_ ):
if key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) <= Wa * key(
lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ):
open_list[j].put(
lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
def snake_case__ ( ):
A : Dict = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowercase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowercase : Dict = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowercase : int = make_common_ground()
lowercase : Optional[int] = blocks_blk
# hyper parameters
lowercase : Dict = 1
lowercase : int = 1
lowercase : str = 20
lowercase : Optional[int] = 3 # one consistent and two other inconsistent
# start and end destination
lowercase : List[Any] = (0, 0)
lowercase : Dict = (n - 1, n - 1)
lowercase : Any = 1
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Any = {start: 0, goal: float('''inf''' )}
A : Optional[Any] = {start: -1, goal: -1}
A : List[Any] = []
A : str = set()
for i in range(lowerCamelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
A : list[int] = []
A : list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowerCamelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
A , A : Tuple = open_list[i].top_show()
visited.add(lowerCamelCase_ )
expand_state(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
close_list_inad.append(lowerCamelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
A : Any = open_list[0].top_show()
visited.add(lowerCamelCase_ )
expand_state(
lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
close_list_anchor.append(lowerCamelCase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 423
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.