code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( __snake_case , unittest.TestCase ):
UpperCamelCase = ConsistencyModelPipeline
UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _lowercase ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _lowercase ( self : List[str] , __lowerCamelCase : Any=False ) -> Optional[Any]:
"""simple docstring"""
if class_cond:
UpperCAmelCase = self.dummy_cond_unet
else:
UpperCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _lowercase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : str=0 ) -> Optional[int]:
"""simple docstring"""
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(__lowerCamelCase )
else:
UpperCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCAmelCase = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = ConsistencyModelPipeline(**__lowerCamelCase )
UpperCAmelCase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components(class_cond=__lowerCamelCase )
UpperCAmelCase = ConsistencyModelPipeline(**__lowerCamelCase )
UpperCAmelCase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase = 0
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = ConsistencyModelPipeline(**__lowerCamelCase )
UpperCAmelCase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase = 1
UpperCAmelCase = None
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components(class_cond=__lowerCamelCase )
UpperCAmelCase = ConsistencyModelPipeline(**__lowerCamelCase )
UpperCAmelCase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase = 1
UpperCAmelCase = None
UpperCAmelCase = 0
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : str=False , __lowerCamelCase : Any="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=(1, 3, 6_4, 6_4) ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = torch.manual_seed(__lowerCamelCase )
UpperCAmelCase = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCAmelCase = self.get_fixed_latents(seed=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase , shape=__lowerCamelCase )
UpperCAmelCase = latents
return inputs
def _lowercase ( self : Dict , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any="cpu" , __lowerCamelCase : Union[str, Any]=torch.floataa , __lowerCamelCase : Tuple=(1, 3, 6_4, 6_4) ) -> List[str]:
"""simple docstring"""
if type(__lowerCamelCase ) == str:
UpperCAmelCase = torch.device(__lowerCamelCase )
UpperCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCAmelCase = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
return latents
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
pipe.to(torch_device=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_inputs()
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
pipe.to(torch_device=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_inputs()
UpperCAmelCase = 1
UpperCAmelCase = None
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
pipe.to(torch_device=__lowerCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_inputs(get_fixed_latents=__lowerCamelCase , device=__lowerCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowerCamelCase , enable_math=__lowerCamelCase , enable_mem_efficient=__lowerCamelCase ):
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase = ConsistencyModelPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
pipe.to(torch_device=__lowerCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_inputs(get_fixed_latents=__lowerCamelCase , device=__lowerCamelCase )
UpperCAmelCase = 1
UpperCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowerCamelCase , enable_math=__lowerCamelCase , enable_mem_efficient=__lowerCamelCase ):
UpperCAmelCase = pipe(**__lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 377
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__a = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
__a = 10
__a = 256
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[MinHash]:
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class __lowercase :
def __init__( self : List[str] , *,
__lowerCamelCase : float = 0.85 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = duplication_jaccard_threshold
UpperCAmelCase = NUM_PERM
UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCAmelCase = defaultdict(__lowerCamelCase )
def _lowercase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : MinHash ) -> None:
"""simple docstring"""
UpperCAmelCase = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
UpperCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase = self.get_duplicate_clusters()
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
UpperCAmelCase , UpperCAmelCase = element
UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->float:
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__a = None
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for elementa in cluster:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase = 1
extremes.append(lowerCAmelCase_ )
return extremes
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
global _shared_dataset
UpperCAmelCase = dataset
UpperCAmelCase = []
UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
UpperCAmelCase = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase = {}
UpperCAmelCase = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase = element
UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(lowerCAmelCase_ )}""" )
print(F"""Number of duplicate clusters: {len(lowerCAmelCase_ )}""" )
print(F"""Files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Unique files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Filtered dataset size: {len(lowerCAmelCase_ )}""" )
return ds_filter, duplicate_clusters
| 377
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = [[] for _ in range(_lowerCAmelCase )]
lowerCamelCase_ = size
def __getitem__( self , UpperCamelCase ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def snake_case ( self ):
"""simple docstring"""
return self._size
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = deque([start_vertex] )
lowerCamelCase_ = [None] * self.size
lowerCamelCase_ = 0
while queue:
lowerCamelCase_ = queue.popleft()
lowerCamelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase_ = current_distance + edge.weight
lowerCamelCase_ = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
a_ : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
a_ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "whisper"
_lowerCamelCase = ["past_key_values"]
_lowerCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase=5_1865 , UpperCamelCase=80 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=1536 , UpperCamelCase=1536 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=5_0257 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=256 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=False , UpperCamelCase=1500 , UpperCamelCase=448 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=None , UpperCamelCase=[220, 5_0256] , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=False , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=7 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = max_source_positions
lowerCamelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
lowerCamelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
lowerCamelCase_ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class snake_case ( lowercase ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase_ = {0: "batch"}
else:
lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction="inputs" )
return common_inputs
def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 2_2050 , UpperCamelCase = 5.0 , UpperCamelCase = 220 , ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowerCamelCase_ = encoder_inputs["input_features"].shape[2]
lowerCamelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = encoder_inputs.pop("input_features" )
lowerCamelCase_ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowerCamelCase_ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def snake_case ( self ):
"""simple docstring"""
return 1e-3
| 445
| 0
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=None , **__lowerCAmelCase : Optional[int] ):
if tokenize_kwargs is None:
__snake_case = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
__snake_case = truncation
__snake_case = tokenize_kwargs
__snake_case = {}
if return_tensors is not None:
__snake_case = return_tensors
return preprocess_params, {}, postprocess_params
def lowercase__ ( self : Any , __lowerCAmelCase : Any , **__lowerCAmelCase : Dict ):
__snake_case = self.framework
__snake_case = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
return model_inputs
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : Tuple ):
__snake_case = self.model(**__lowerCAmelCase )
return model_outputs
def lowercase__ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
| 356
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
__snake_case = 'ylacombe/bark-small'
__snake_case = tempfile.mkdtemp()
__snake_case = 'en_speaker_1'
__snake_case = 'This is a test string'
__snake_case = 'speaker_embeddings_path.json'
__snake_case = 'speaker_embeddings'
def lowercase__ ( self : int , **__lowerCAmelCase : str ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Dict ):
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase__ ( self : int ):
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self : str ):
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__snake_case = 3_5
__snake_case = 2
__snake_case = 8
__snake_case = {
'semantic_prompt': np.ones(__lowerCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__snake_case = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
__snake_case = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__snake_case = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
__snake_case = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__lowerCAmelCase )
__snake_case = processor(text=self.input_string )
__snake_case = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 356
| 1
|
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE_ ( ):
from torch.utils.cpp_extension import load
UpperCamelCase__ : str = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
UpperCamelCase__ : int = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 702
|
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_0_0 , ):
UpperCamelCase__ : Union[str, Any] = x_start
UpperCamelCase__ : List[Any] = fnc(UpperCamelCase__ )
UpperCamelCase__ : Any = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase__ : str = (x_end - x_start) / steps + xa
UpperCamelCase__ : Dict = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase__ : Tuple = xa
UpperCamelCase__ : Union[str, Any] = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowerCamelCase =1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 462
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=3_3 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Any = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Dict:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Dict = EsmModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase_ : List[str] = model(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Tuple = EsmForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = EsmForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = False
_snake_case : List[str] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : List[str] = ()
_snake_case : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : List[str] = True
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[int] = EsmModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = EsmModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ : Optional[Any] = EsmEmbeddings(config=_UpperCamelCase )
UpperCAmelCase_ : str = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
UpperCAmelCase_ : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase_ : Union[str, Any] = create_position_ids_from_input_ids(_UpperCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_UpperCamelCase , _UpperCamelCase ) ) )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ : Union[str, Any] = EsmEmbeddings(config=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.empty(2 , 4 , 3_0 )
UpperCAmelCase_ : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase_ : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase_ : Any = embeddings.create_position_ids_from_inputs_embeds(_UpperCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_UpperCamelCase , _UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@require_torch
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_ : Dict = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase_ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Dict = model(_UpperCamelCase )[0]
UpperCAmelCase_ : Tuple = 3_3
UpperCAmelCase_ : int = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : int = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Any = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase_ : Dict = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
UpperCAmelCase_ : List[str] = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : str = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 406
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase = None ) -> int:
UpperCAmelCase_ : Union[str, Any] = max_length
UpperCAmelCase_ : List[Any] = max_position_embeddings
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
UpperCAmelCase_ : Union[str, Any] = input_ids.shape[-1]
UpperCAmelCase_ : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'exceptions, performance degradation, or nothing at all.' )
return is_done
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'with `max_length = start_length + max_new_tokens` instead.' , _UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = start_length
UpperCAmelCase_ : Union[str, Any] = max_new_tokens
UpperCAmelCase_ : List[Any] = start_length + max_new_tokens
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase = None ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = max_time
UpperCAmelCase_ : str = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
return any(criteria(_UpperCamelCase , _UpperCamelCase ) for criteria in self )
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return stopping_criterium.max_length
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return stopping_criterium.max_length
return None
def lowercase__ ( __snake_case : StoppingCriteriaList , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = stopping_criteria.max_length
UpperCAmelCase_ : Optional[Any] = deepcopy(__snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , __snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__snake_case ) )
return new_stopping_criteria
| 406
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 716
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_="train" ) -> List[Any]:
'''simple docstring'''
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = []
lowerCamelCase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> List[str]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.documents[idx]
lowerCamelCase_ = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as source:
lowerCamelCase_ = source.read()
lowerCamelCase_ ,lowerCamelCase_ = process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def _UpperCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = list(filter(lambda __UpperCamelCase : len(__UpperCamelCase ) != 0 ,[line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase_ = [_add_missing_period(__UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase_ = []
lowerCamelCase_ = deque(__UpperCamelCase )
while True:
try:
lowerCamelCase_ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase_ = list(filter(lambda __UpperCamelCase : not t.startswith('@highlight' ) ,__UpperCamelCase ) )
return story_lines, summary_lines
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
if len(__UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__UpperCamelCase )) )
return sequence
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = torch.ones_like(__UpperCamelCase )
lowerCamelCase_ = sequence == pad_token_id
lowerCamelCase_ = 0
return mask
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = [tokenizer.encode(__UpperCamelCase ) for line in story_lines]
lowerCamelCase_ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase_ = [tokenizer.encode(__UpperCamelCase ) for line in summary_lines]
lowerCamelCase_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = []
for sequence in batch:
lowerCamelCase_ = -1
lowerCamelCase_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__UpperCamelCase )
return torch.tensor(__UpperCamelCase )
| 384
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[str] = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: List[str] = 'roberta-prelayernorm'
def __init__( self , lowerCamelCase__=50_265 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
lowerCAmelCase_: Any = vocab_size
lowerCAmelCase_: Union[str, Any] = hidden_size
lowerCAmelCase_: Tuple = num_hidden_layers
lowerCAmelCase_: List[str] = num_attention_heads
lowerCAmelCase_: str = hidden_act
lowerCAmelCase_: int = intermediate_size
lowerCAmelCase_: Dict = hidden_dropout_prob
lowerCAmelCase_: str = attention_probs_dropout_prob
lowerCAmelCase_: Union[str, Any] = max_position_embeddings
lowerCAmelCase_: Any = type_vocab_size
lowerCAmelCase_: Optional[Any] = initializer_range
lowerCAmelCase_: Optional[Any] = layer_norm_eps
lowerCAmelCase_: List[Any] = position_embedding_type
lowerCAmelCase_: List[Any] = use_cache
lowerCAmelCase_: Optional[int] = classifier_dropout
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _a ( self ):
if self.task == "multiple-choice":
lowerCAmelCase_: List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_: Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 613
|
def snake_case__ ( lowercase ):
lowerCAmelCase_: Union[str, Any] = [1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: int = 0, 0, 0
lowerCAmelCase_: Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase_: str = ugly_nums[ia] * 3
lowerCAmelCase_: Dict = ugly_nums[ia] * 5
for _ in range(1 , lowercase ):
lowerCAmelCase_: Any = min(lowercase , lowercase , lowercase )
ugly_nums.append(lowercase )
if next_num == next_a:
ia += 1
lowerCAmelCase_: str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase_: Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase_: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 613
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( __a ):
"""simple docstring"""
_A : Tuple = '''sew'''
def __init__( self : Optional[Any] ,_UpperCamelCase : Dict=3_2 ,_UpperCamelCase : Optional[Any]=7_6_8 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : List[str]=1_2 ,_UpperCamelCase : List[Any]=3_0_7_2 ,_UpperCamelCase : Union[str, Any]=2 ,_UpperCamelCase : Union[str, Any]="gelu" ,_UpperCamelCase : Optional[int]=0.1 ,_UpperCamelCase : Tuple=0.1 ,_UpperCamelCase : Optional[Any]=0.1 ,_UpperCamelCase : List[str]=0.0 ,_UpperCamelCase : Union[str, Any]=0.1 ,_UpperCamelCase : Dict=0.1 ,_UpperCamelCase : int=0.02 ,_UpperCamelCase : Any=1e-5 ,_UpperCamelCase : Optional[Any]="group" ,_UpperCamelCase : Optional[Any]="gelu" ,_UpperCamelCase : Dict=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,_UpperCamelCase : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_UpperCamelCase : Dict=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_UpperCamelCase : str=False ,_UpperCamelCase : Optional[Any]=1_2_8 ,_UpperCamelCase : Optional[Any]=1_6 ,_UpperCamelCase : Any=True ,_UpperCamelCase : int=0.05 ,_UpperCamelCase : int=1_0 ,_UpperCamelCase : Optional[Any]=2 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : Union[str, Any]=1_0 ,_UpperCamelCase : Dict=0 ,_UpperCamelCase : List[Any]="mean" ,_UpperCamelCase : Tuple=False ,_UpperCamelCase : Tuple=False ,_UpperCamelCase : str=2_5_6 ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : List[str]=1 ,_UpperCamelCase : int=2 ,**_UpperCamelCase : Any ,) -> str:
'''simple docstring'''
super().__init__(**snake_case__ ,pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE__ =hidden_size
SCREAMING_SNAKE_CASE__ =feat_extract_norm
SCREAMING_SNAKE_CASE__ =feat_extract_activation
SCREAMING_SNAKE_CASE__ =list(snake_case__ )
SCREAMING_SNAKE_CASE__ =list(snake_case__ )
SCREAMING_SNAKE_CASE__ =list(snake_case__ )
SCREAMING_SNAKE_CASE__ =conv_bias
SCREAMING_SNAKE_CASE__ =num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ =num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ =len(self.conv_dim )
SCREAMING_SNAKE_CASE__ =num_hidden_layers
SCREAMING_SNAKE_CASE__ =intermediate_size
SCREAMING_SNAKE_CASE__ =squeeze_factor
SCREAMING_SNAKE_CASE__ =hidden_act
SCREAMING_SNAKE_CASE__ =num_attention_heads
SCREAMING_SNAKE_CASE__ =hidden_dropout
SCREAMING_SNAKE_CASE__ =attention_dropout
SCREAMING_SNAKE_CASE__ =activation_dropout
SCREAMING_SNAKE_CASE__ =feat_proj_dropout
SCREAMING_SNAKE_CASE__ =final_dropout
SCREAMING_SNAKE_CASE__ =layerdrop
SCREAMING_SNAKE_CASE__ =layer_norm_eps
SCREAMING_SNAKE_CASE__ =initializer_range
SCREAMING_SNAKE_CASE__ =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ =apply_spec_augment
SCREAMING_SNAKE_CASE__ =mask_time_prob
SCREAMING_SNAKE_CASE__ =mask_time_length
SCREAMING_SNAKE_CASE__ =mask_time_min_masks
SCREAMING_SNAKE_CASE__ =mask_feature_prob
SCREAMING_SNAKE_CASE__ =mask_feature_length
SCREAMING_SNAKE_CASE__ =mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ =ctc_loss_reduction
SCREAMING_SNAKE_CASE__ =ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE__ =use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ =classifier_proj_size
@property
def __A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 706
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, __UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, __UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE__ =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ =value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ =value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ =value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ =value
else:
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ =hf_model.feature_extractor
SCREAMING_SNAKE_CASE__ =hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ =False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, hf_model.config.feat_extract_norm == """group""", )
SCREAMING_SNAKE_CASE__ =True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE__ =True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ =name.split(__UpperCamelCase )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ =mapped_key.replace("""*""", __UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ ="""weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ ="""weight_v"""
elif "bias" in name:
SCREAMING_SNAKE_CASE__ ="""bias"""
elif "weight" in name:
SCREAMING_SNAKE_CASE__ ="""weight"""
else:
SCREAMING_SNAKE_CASE__ =None
set_recursively(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ =name.split(""".""" )
SCREAMING_SNAKE_CASE__ =int(items[0] )
SCREAMING_SNAKE_CASE__ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =full_name.split("""adaptor.""" )[-1]
SCREAMING_SNAKE_CASE__ =name.split(""".""" )
if items[1].isdigit():
SCREAMING_SNAKE_CASE__ =int(items[1] )
else:
SCREAMING_SNAKE_CASE__ =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE__ =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__UpperCamelCase, __UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE__ =value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =emb.weight.shape
SCREAMING_SNAKE_CASE__ =nn.Linear(__UpperCamelCase, __UpperCamelCase, bias=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, ):
SCREAMING_SNAKE_CASE__ =WavaVecaConfig.from_pretrained(
__UpperCamelCase, add_adapter=__UpperCamelCase, adapter_stride=__UpperCamelCase, adapter_kernel_size=__UpperCamelCase, use_auth_token=__UpperCamelCase, output_hidden_size=__UpperCamelCase, )
SCREAMING_SNAKE_CASE__ =MBartConfig.from_pretrained(__UpperCamelCase )
# load model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
}, )
SCREAMING_SNAKE_CASE__ =model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE__ =WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase, use_auth_token=__UpperCamelCase )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ =WavaVecaModel(__UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder, __UpperCamelCase )
# load decoder weights
SCREAMING_SNAKE_CASE__ =MBartForCausalLM(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__UpperCamelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
SCREAMING_SNAKE_CASE__ =SpeechEncoderDecoderModel(encoder=__UpperCamelCase, decoder=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =MBartaaTokenizer(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ =tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ =tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ =tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ ="""mbart50"""
SCREAMING_SNAKE_CASE__ ="""wav2vec2"""
SCREAMING_SNAKE_CASE__ =tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ =250_004
SCREAMING_SNAKE_CASE__ =tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ =SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 588
| 0
|
from __future__ import annotations
from typing import Any
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = row, column
SCREAMING_SNAKE_CASE_ : Dict = [[default_value for c in range(UpperCamelCase_ )] for r in range(UpperCamelCase_ )]
def __str__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
SCREAMING_SNAKE_CASE_ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE_ : int = max(UpperCamelCase_ , len(str(UpperCamelCase_ ) ) )
SCREAMING_SNAKE_CASE_ : int = F'%{max_element_length}s'
# Make string and return
def single_line(__lowerCAmelCase ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase_ ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def __A ( self , __lowerCAmelCase ):
if not (isinstance(UpperCamelCase_ , (list, tuple) ) and len(UpperCamelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __lowerCAmelCase ):
assert self.validate_indicies(UpperCamelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __lowerCAmelCase , __lowerCAmelCase ):
assert self.validate_indicies(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
def __add__( self , __lowerCAmelCase ):
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE_ : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE_ : str = self[r, c] + another[r, c]
return result
def __neg__( self ):
SCREAMING_SNAKE_CASE_ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE_ : str = -self[r, c]
return result
def __sub__( self , __lowerCAmelCase ):
return self + (-another)
def __mul__( self , __lowerCAmelCase ):
if isinstance(UpperCamelCase_ , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE_ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self[r, c] * another
return result
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE_ : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE_ : int = F'Unsupported type given for another ({type(UpperCamelCase_ )})'
raise TypeError(UpperCamelCase_ )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self[r, c]
return result
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE_ : Tuple = v.transpose()
SCREAMING_SNAKE_CASE_ : List[str] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( ) -> None:
SCREAMING_SNAKE_CASE_ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1
print(f'a^(-1) is {ainv}' )
# u, v
SCREAMING_SNAKE_CASE_ : Tuple = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE_ : List[Any] = 1, 2, -3
SCREAMING_SNAKE_CASE_ : int = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE_ : Dict = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(_a , _a )}' )
def __SCREAMING_SNAKE_CASE ( ) -> None:
import doctest
doctest.testmod()
testa()
| 345
|
from __future__ import annotations
def UpperCamelCase ( _a , _a = None , _a = None ) -> None:
'''simple docstring'''
if start is None:
lowercase_ :Optional[int] = 0
if end is None:
lowercase_ :Any = len(_a ) - 1
if start >= end:
return
lowercase_ :Dict = (start + end) // 2
slowsort(_a , _a , _a )
slowsort(_a , mid + 1 , _a )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ :Any = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 257
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = ["image_processor", "tokenizer"]
UpperCamelCase_ : Union[str, Any] = "ViltImageProcessor"
UpperCamelCase_ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a=None , a=None , **a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a , )
_UpperCamelCase = kwargs.pop("""feature_extractor""" )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a , a )
_UpperCamelCase = self.image_processor
def __call__( self , a , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
_UpperCamelCase = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def A_ ( self , *a , **a ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self , *a , **a ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a , )
return self.image_processor_class
@property
def A_ ( self ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a , )
return self.image_processor
| 202
| 0
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=10_24 ) -> str:
lowerCamelCase_ ,lowerCamelCase_ = [], []
lowerCamelCase_ = list(zip(__UpperCamelCase ,__UpperCamelCase ) )
lowerCamelCase_ ,lowerCamelCase_ = sorted_examples[0]
def is_too_big(__UpperCamelCase ):
return tok(__UpperCamelCase ,return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase_ = new_src + ' ' + src
lowerCamelCase_ = new_tgt + ' ' + tgt
if is_too_big(__UpperCamelCase ) or is_too_big(__UpperCamelCase ): # cant fit, finalize example
finished_src.append(__UpperCamelCase )
finished_tgt.append(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = src, tgt
else: # can fit, keep adding
lowerCamelCase_ ,lowerCamelCase_ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__UpperCamelCase )
finished_tgt.append(__UpperCamelCase )
return finished_src, finished_tgt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = Path(__UpperCamelCase )
save_path.mkdir(exist_ok=__UpperCamelCase )
for split in ["train"]:
lowerCamelCase_ ,lowerCamelCase_ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
lowerCamelCase_ = [x.rstrip() for x in Path(__UpperCamelCase ).open().readlines()]
lowerCamelCase_ = [x.rstrip() for x in Path(__UpperCamelCase ).open().readlines()]
lowerCamelCase_ ,lowerCamelCase_ = pack_examples(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
print(f'''packed {split} split from {len(__UpperCamelCase )} examples -> {len(__UpperCamelCase )}.''' )
Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(__UpperCamelCase ) )
Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(__UpperCamelCase ) )
for split in ["val", "test"]:
lowerCamelCase_ ,lowerCamelCase_ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__UpperCamelCase ,save_path / f'''{split}.source''' )
shutil.copyfile(__UpperCamelCase ,save_path / f'''{split}.target''' )
def _UpperCamelCase ( ) -> List[str]:
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--tok_name' ,type=__UpperCamelCase ,help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' ,type=__UpperCamelCase ,default=1_28 )
parser.add_argument('--data_dir' ,type=__UpperCamelCase )
parser.add_argument('--save_path' ,type=__UpperCamelCase )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__UpperCamelCase ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 42
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42
| 1
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = ''
for i in table:
res += inp[i - 1]
return res
def a(lowercase__ ):
'''simple docstring'''
return data[1:] + data[0]
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = ''
for i in range(len(lowercase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = int('0b' + data[0] + data[-1] , 2 )
snake_case_ = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = message[:4]
snake_case_ = message[4:]
snake_case_ = apply_table(lowercase__ , lowercase__ )
snake_case_ = xor(lowercase__ , lowercase__ )
snake_case_ = apply_sbox(lowercase__ , temp[:4] ) # noqa: E741
snake_case_ = apply_sbox(lowercase__ , temp[4:] )
snake_case_ = '0' * (2 - len(lowercase__ )) + l # noqa: E741
snake_case_ = '0' * (2 - len(lowercase__ )) + r
snake_case_ = apply_table(l + r , lowercase__ )
snake_case_ = xor(lowercase__ , lowercase__ )
return temp + right
if __name__ == "__main__":
A = input('Enter 10 bit key: ')
A = input('Enter 8 bit message: ')
A = [6, 3, 7, 4, 8, 5, 10, 9]
A = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
A = [2, 4, 3, 1]
A = [2, 6, 3, 1, 4, 8, 5, 7]
A = [4, 1, 3, 5, 7, 2, 8, 6]
A = [4, 1, 2, 3, 2, 3, 4, 1]
A = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
A = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
A = apply_table(key, paa_table)
A = temp[:5]
A = temp[5:]
A = left_shift(left)
A = left_shift(right)
A = apply_table(left + right, pa_table)
A = left_shift(left)
A = left_shift(right)
A = left_shift(left)
A = left_shift(right)
A = apply_table(left + right, pa_table)
# encryption
A = apply_table(message, IP)
A = function(expansion, sa, sa, keya, temp)
A = temp[4:] + temp[:4]
A = function(expansion, sa, sa, keya, temp)
A = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
A = apply_table(CT, IP)
A = function(expansion, sa, sa, keya, temp)
A = temp[4:] + temp[:4]
A = function(expansion, sa, sa, keya, temp)
A = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : List[Any] , _snake_case : Dict=False , _snake_case : Optional[int]=False , _snake_case : str=False ) -> Optional[Any]:
'''simple docstring'''
_A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _snake_case ( _snake_case : int , _snake_case : int ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_A = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def _snake_case ( _snake_case : Tuple ) -> str:
'''simple docstring'''
_A = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : Dict , _snake_case : str , _snake_case : Tuple ) -> str:
'''simple docstring'''
_A = dct.pop(_snake_case )
_A = val
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> List[str]:
'''simple docstring'''
_A = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=_snake_case )
_A = False
_A = False
_A = False
_A = False
if "vqa" in checkpoint_url:
_A = True
_A = 31_29
_A = 'huggingface/label-files'
_A = 'vqa2-id2label.json'
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = ViltForQuestionAnswering(_snake_case )
elif "nlvr" in checkpoint_url:
_A = True
_A = 2
_A = {0: 'False', 1: 'True'}
_A = {v: k for k, v in config.idalabel.items()}
_A = 3
_A = ViltForImagesAndTextClassification(_snake_case )
elif "irtr" in checkpoint_url:
_A = True
_A = ViltForImageAndTextRetrieval(_snake_case )
elif "mlm_itm" in checkpoint_url:
_A = True
_A = ViltForMaskedLM(_snake_case )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
_A = torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )['state_dict']
_A = create_rename_keys(_snake_case , _snake_case , _snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case )
if mlm_model or irtr_model:
_A = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_A , _A = model.load_state_dict(_snake_case , strict=_snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_snake_case )
# Define processor
_A = ViltImageProcessor(size=3_84 )
_A = BertTokenizer.from_pretrained('bert-base-uncased' )
_A = ViltProcessor(_snake_case , _snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
_A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_snake_case ).raw )
_A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_snake_case ).raw )
_A = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
_A = processor(_snake_case , _snake_case , return_tensors='pt' )
_A = processor(_snake_case , _snake_case , return_tensors='pt' )
_A = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_A = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=_snake_case ).raw )
if mlm_model:
_A = 'a bunch of [MASK] laying on a [MASK].'
else:
_A = 'How many cats are there?'
_A = processor(_snake_case , _snake_case , return_tensors='pt' )
_A = model(**_snake_case )
# Verify outputs
if mlm_model:
_A = torch.Size([1, 11, 3_05_22] )
_A = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1E-4 )
# verify masked token prediction equals "cats"
_A = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_A = torch.Size([1, 31_29] )
_A = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1E-4 )
# verify vqa prediction equals "2"
_A = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_A = torch.Size([1, 2] )
_A = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7
|
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7
| 1
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : list ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = collection[i]
lowerCamelCase__ = 0
lowerCamelCase__ = i - 1
while low <= high:
lowerCamelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCamelCase__ = mid - 1
else:
lowerCamelCase__ = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
lowerCamelCase__ = collection[j - 1]
lowerCamelCase__ = val
return collection
if __name__ == "__main__":
UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase : int = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 703
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase ( __lowerCamelCase : Callable[[int | float], int | float] , __lowerCamelCase : int | float , __lowerCamelCase : int | float , __lowerCamelCase : int = 100 , ):
snake_case : Union[str, Any] = x_start
snake_case : Optional[int] = fnc(__lowerCamelCase )
snake_case : Dict = 0.0
for _ in range(__lowerCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case : Optional[int] = (x_end - x_start) / steps + xa
snake_case : Optional[int] = fnc(__lowerCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case : Optional[Any] = xa
snake_case : Optional[int] = fxa
return length
if __name__ == "__main__":
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
__lowerCamelCase = 10
while i <= 10_00_00:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 204
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
__lowerCamelCase = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : List[Any] = torch.load(__lowerCamelCase , map_location="cpu" )
return sd
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=rename_keys_prefix ):
snake_case : Optional[Any] = OrderedDict()
snake_case : Optional[int] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case : Any = key
for name_pair in rename_keys_prefix:
snake_case : List[Any] = new_key.replace(name_pair[0] , name_pair[1] )
snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case : int = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case : Optional[Any] = "pretraining"
if "vcr" in checkpoint_path:
snake_case : Optional[int] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
snake_case : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
snake_case : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
snake_case : Tuple = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case : Optional[Any] = {"visual_embedding_dim": 512}
snake_case : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
snake_case : Dict = {"visual_embedding_dim": 2048}
snake_case : Tuple = "vqa_advanced"
elif "vqa" in checkpoint_path:
snake_case : Optional[Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
snake_case : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
snake_case : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
snake_case : str = "nlvr"
snake_case : int = VisualBertConfig(**__lowerCamelCase )
# Load State Dict
snake_case : Any = load_state_dict(__lowerCamelCase )
snake_case : List[Any] = get_new_dict(__lowerCamelCase , __lowerCamelCase )
if model_type == "pretraining":
snake_case : str = VisualBertForPreTraining(__lowerCamelCase )
elif model_type == "vqa":
snake_case : Any = VisualBertForQuestionAnswering(__lowerCamelCase )
elif model_type == "nlvr":
snake_case : Optional[Any] = VisualBertForVisualReasoning(__lowerCamelCase )
elif model_type == "multichoice":
snake_case : Optional[int] = VisualBertForMultipleChoice(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__lowerCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 204
| 1
|
from statistics import mean
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 0
# Number of processes finished
UpperCAmelCase_ : Dict = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCAmelCase_ : Optional[Any] = [0] * no_of_process
# List to include calculation results
UpperCAmelCase_ : Dict = [0] * no_of_process
# Sort by arrival time.
UpperCAmelCase_ : Dict = [burst_time[i] for i in np.argsort(_lowercase )]
UpperCAmelCase_ : int = [process_name[i] for i in np.argsort(_lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCAmelCase_ : Optional[int] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCAmelCase_ : int = arrival_time[i]
UpperCAmelCase_ : int = 0
# Index showing the location of the process being performed
UpperCAmelCase_ : List[str] = 0
# Saves the current response ratio.
UpperCAmelCase_ : Any = 0
for i in range(0 , _lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCAmelCase_ : List[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCAmelCase_ : str = temp
UpperCAmelCase_ : Any = i
# Calculate the turn around time
UpperCAmelCase_ : str = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCAmelCase_ : Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = [0] * no_of_process
for i in range(0 , _lowercase ):
UpperCAmelCase_ : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__a = 5
__a = ['A', 'B', 'C', 'D', 'E']
__a = [1, 2, 3, 4, 5]
__a = [1, 2, 3, 4, 5]
__a = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__a = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 300
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : List[Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : List[str] = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Any = rescale_factor
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : List[str] = do_convert_rgb
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : Tuple = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size=size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Any:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Optional[int] = size if size is not None else self.size
UpperCAmelCase_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''size''' ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Dict = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[Any] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Any = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase_ : str = [self.center_crop(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase_ : int = [self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase_ : str = [self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase_ : List[str] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
| 300
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = os.path.abspath(a__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__UpperCAmelCase : List[Any] = tf.train.list_variables(a__ )
__UpperCAmelCase : Any = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__UpperCAmelCase : List[Any] = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__UpperCAmelCase : List[str] = name[1:]
# figure out how many levels deep the name is
__UpperCAmelCase : str = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(a__ )
# read data
__UpperCAmelCase : str = tf.train.load_variable(a__ , a__ )
names.append('''/'''.join(a__ ) )
arrays.append(a__ )
logger.info(f"Read a total of {len(a__ ):,} layers" )
# Sanity check
if len(set(a__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(a__ ) )})" )
__UpperCAmelCase : Union[str, Any] = list(set(a__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(a__ , a__ ):
__UpperCAmelCase : str = full_name.split('''/''' )
__UpperCAmelCase : Optional[Any] = model
__UpperCAmelCase : Optional[Any] = []
for i, m_name in enumerate(a__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__UpperCAmelCase : Tuple = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__UpperCAmelCase : Tuple = getattr(a__ , '''embeddings''' )
__UpperCAmelCase : List[str] = getattr(a__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__UpperCAmelCase : Optional[Any] = getattr(a__ , '''encoder''' )
__UpperCAmelCase : Optional[int] = getattr(a__ , '''layer''' )
__UpperCAmelCase : Dict = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__UpperCAmelCase : str = getattr(a__ , '''pooler''' )
__UpperCAmelCase : Dict = getattr(a__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__UpperCAmelCase : Union[str, Any] = getattr(a__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__UpperCAmelCase : Dict = getattr(a__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__UpperCAmelCase : List[Any] = getattr(a__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__UpperCAmelCase : Optional[Any] = getattr(a__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__UpperCAmelCase : List[Any] = getattr(a__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__UpperCAmelCase : List[str] = getattr(a__ , '''attention''' )
__UpperCAmelCase : List[Any] = getattr(a__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__UpperCAmelCase : Dict = getattr(a__ , '''attention''' )
__UpperCAmelCase : List[Any] = getattr(a__ , '''output''' )
__UpperCAmelCase : Tuple = getattr(a__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__UpperCAmelCase : int = getattr(a__ , '''attention''' )
__UpperCAmelCase : str = getattr(a__ , '''output''' )
__UpperCAmelCase : Union[str, Any] = getattr(a__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__UpperCAmelCase : Optional[Any] = getattr(a__ , '''output''' )
__UpperCAmelCase : str = getattr(a__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__UpperCAmelCase : int = getattr(a__ , '''output''' )
__UpperCAmelCase : Optional[Any] = getattr(a__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__UpperCAmelCase : Tuple = getattr(a__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__UpperCAmelCase : Optional[int] = getattr(a__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__UpperCAmelCase : Optional[Any] = getattr(a__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__UpperCAmelCase : List[Any] = getattr(a__ , '''intermediate''' )
__UpperCAmelCase : List[str] = getattr(a__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__UpperCAmelCase : Optional[int] = getattr(a__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__UpperCAmelCase : List[str] = getattr(a__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__UpperCAmelCase : Tuple = getattr(a__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__UpperCAmelCase : Optional[Any] = '.'.join(a__ )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , a__ ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' , a__ ):
__UpperCAmelCase : Optional[int] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__UpperCAmelCase : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__UpperCAmelCase : Tuple = torch.from_numpy(a__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
logger.info(f"Loading model based on config from {config_path}..." )
__UpperCAmelCase : Union[str, Any] = BertConfig.from_json_file(a__ )
__UpperCAmelCase : Optional[int] = BertModel(a__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(a__ , a__ , a__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 462
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class __UpperCamelCase :
lowercase_ : Optional[str] = field(default=UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase_ : Optional[str] = field(
default=UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase_ : bool = field(
default=UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
if self.train_file is not None:
lowerCAmelCase :int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase :int = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __UpperCamelCase :
lowercase_ : PreTrainedTokenizerBase
lowercase_ : Union[bool, str, PaddingStrategy] = True
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = None
def __call__( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase :Dict = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase :Any = [feature.pop(UpperCAmelCase ) for feature in features]
lowerCAmelCase :int = len(UpperCAmelCase )
lowerCAmelCase :int = len(features[0]['input_ids'] )
lowerCAmelCase :Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase )] for feature in features
]
lowerCAmelCase :Optional[Any] = list(chain(*UpperCAmelCase ) )
lowerCAmelCase :Any = self.tokenizer.pad(
UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase :Optional[int] = {k: v.view(UpperCAmelCase , UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase :List[Any] = torch.tensor(UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase :str = training_args.get_process_log_level()
logger.setLevel(a__ )
datasets.utils.logging.set_verbosity(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase :Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase :Union[str, Any] = {}
if data_args.train_file is not None:
lowerCAmelCase :Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase :str = data_args.validation_file
lowerCAmelCase :List[str] = data_args.train_file.split('.' )[-1]
lowerCAmelCase :Optional[int] = load_dataset(
a__ , data_files=a__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase :Dict = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase :Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase :List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase :Any = [F"""ending{i}""" for i in range(4 )]
lowerCAmelCase :str = 'sent1'
lowerCAmelCase :Optional[int] = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase :Union[str, Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase :Tuple = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase :str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a__ ):
lowerCAmelCase :int = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase :List[str] = examples[question_header_name]
lowerCAmelCase :Tuple = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a__ )
]
# Flatten out
lowerCAmelCase :Any = list(chain(*a__ ) )
lowerCAmelCase :Dict = list(chain(*a__ ) )
# Tokenize
lowerCAmelCase :str = tokenizer(
a__ , a__ , truncation=a__ , max_length=a__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(a__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase :Optional[int] = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase :int = min(len(a__ ) , data_args.max_train_samples )
lowerCAmelCase :int = train_dataset.select(range(a__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase :Dict = train_dataset.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase :Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase :Any = min(len(a__ ) , data_args.max_eval_samples )
lowerCAmelCase :str = eval_dataset.select(range(a__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase :List[str] = eval_dataset.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase :List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a__ ):
lowerCAmelCase , lowerCAmelCase :List[str] = eval_predictions
lowerCAmelCase :Tuple = np.argmax(a__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase :List[str] = Trainer(
model=a__ , args=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , )
# Training
if training_args.do_train:
lowerCAmelCase :Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase :Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase :Union[str, Any] = last_checkpoint
lowerCAmelCase :Optional[int] = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase :Tuple = train_result.metrics
lowerCAmelCase :Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
lowerCAmelCase :List[Any] = min(a__ , len(a__ ) )
trainer.log_metrics('train' , a__ )
trainer.save_metrics('train' , a__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase :Dict = trainer.evaluate()
lowerCAmelCase :Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__ )
lowerCAmelCase :Tuple = min(a__ , len(a__ ) )
trainer.log_metrics('eval' , a__ )
trainer.save_metrics('eval' , a__ )
lowerCAmelCase :List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 553
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase ( _snake_case ):
"""simple docstring"""
A : int = 'poolformer'
def __init__( self : Any , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : Dict=1_6 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[str]=4.0 , UpperCAmelCase_ : List[Any]=[2, 2, 6, 2] , UpperCAmelCase_ : Union[str, Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , UpperCAmelCase_ : int=[7, 3, 3, 3] , UpperCAmelCase_ : List[str]=[4, 2, 2, 2] , UpperCAmelCase_ : Any=[2, 1, 1, 1] , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=1e-5 , UpperCAmelCase_ : Dict=0.02 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
__lowerCamelCase : Any = num_channels
__lowerCamelCase : str = patch_size
__lowerCamelCase : Union[str, Any] = stride
__lowerCamelCase : Any = padding
__lowerCamelCase : int = pool_size
__lowerCamelCase : Dict = hidden_sizes
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : str = patch_sizes
__lowerCamelCase : List[str] = strides
__lowerCamelCase : Tuple = num_encoder_blocks
__lowerCamelCase : Tuple = drop_path_rate
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Any = use_layer_scale
__lowerCamelCase : str = layer_scale_init_value
__lowerCamelCase : int = initializer_range
super().__init__(**UpperCAmelCase_)
class UpperCamelCase ( _snake_case ):
"""simple docstring"""
A : List[str] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 2e-3
| 713
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase : Optional[int] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610
| 0
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_A = ViTMAEModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_A = ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ )
_A = (self.image_size // self.patch_size) ** 2
_A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A = 1
_A = ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(snake_case_ )
_A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
_A, _A, _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__magic_name__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = ViTMAEModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
# make masks reproducible
np.random.seed(2 )
_A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = torch.from_numpy(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A = pt_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_A = outputs[0].cpu().numpy()
_A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_A = model_class.from_pretrained(snake_case_ )
model.to(snake_case_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
# Make sure we don't have nans
_A = after_outputs[0].cpu().numpy()
_A = 0
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase__ ( self ):
pass
@slow
def lowerCAmelCase__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ViTMAEModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __lowerCAmelCase( ) -> Any:
"""simple docstring"""
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_A = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case_ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=snake_case_ , return_tensors='pt' ).to(snake_case_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A = ViTMAEConfig()
_A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_A = model(**snake_case_ , noise=torch.from_numpy(snake_case_ ).to(device=snake_case_ ) )
# verify the logits
_A = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_A = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case_ ) , atol=1E-4 ) )
| 27
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_a: List[str] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Optional[int] , **lowerCAmelCase : List[str] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ = deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
UpperCAmelCase_ = kwargs.pop("torchscript" , self.torchscript )
UpperCAmelCase_ = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
UpperCAmelCase_ = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = field(default=lowercase , metadata={'help': 'Trace the models using torchscript'} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
SCREAMING_SNAKE_CASE__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
UpperCAmelCase_ = torch.device("cpu" )
UpperCAmelCase_ = 0
elif is_torch_tpu_available():
UpperCAmelCase_ = xm.xla_device()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCAmelCase_ = torch.cuda.device_count()
return device, n_gpu
@property
def __A ( self : int ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __A ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __A ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def __A ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.n_gpu > 0
| 162
| 0
|
import re
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(__UpperCamelCase , __UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 379
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
def __a ( self : str , _lowercase : str , _lowercase : Dict , _lowercase : List[Any] ):
"""simple docstring"""
return None
class __snake_case :
def __a ( self : List[str] , _lowercase : List[str] , _lowercase : str , _lowercase : List[Any] , _lowercase : str ):
"""simple docstring"""
return None
class __snake_case ( unittest.TestCase ):
lowerCAmelCase_ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __a ( self : List[Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowercase , """tf""" , 12 , **_lowercase )
@require_torch
@slow
def __a ( self : Optional[Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowercase , """pt""" , 12 , **_lowercase )
@require_torch
@slow
def __a ( self : Dict ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(_lowercase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE__ = BertModel(BertConfig(vocab_size=len(_lowercase ) ) )
model.save_pretrained(_lowercase )
self._test_export(_lowercase , """pt""" , 12 , _lowercase )
@require_tf
@slow
def __a ( self : Dict ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ = self._test_export(_lowercase , """tf""" , 12 , **_lowercase )
SCREAMING_SNAKE_CASE__ = quantize(Path(_lowercase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def __a ( self : Any ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ = self._test_export(_lowercase , """pt""" , 12 , **_lowercase )
SCREAMING_SNAKE_CASE__ = quantize(_lowercase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def __a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : str=None , **_lowercase : List[str] ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(_lowercase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase )
return path
except Exception as e:
self.fail(_lowercase )
@require_torch
@require_tokenizers
@slow
def __a ( self : List[str] ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE__ = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_lowercase , _lowercase , """pt""" )
@require_tf
@require_tokenizers
@slow
def __a ( self : Dict ):
"""simple docstring"""
from transformers import TFBertModel
SCREAMING_SNAKE_CASE__ = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_lowercase , _lowercase , """tf""" )
def __a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FeatureExtractionPipeline(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = infer_shapes(_lowercase , _lowercase )
# Assert all variables are present
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _lowercase )
self.assertSequenceEqual(variable_names[3:] , _lowercase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ensure_valid_input(FuncContiguousArgs() , _lowercase , _lowercase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_lowercase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_lowercase ) , set(_lowercase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_lowercase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ensure_valid_input(FuncNonContiguousArgs() , _lowercase , _lowercase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_lowercase ) , 1 )
self.assertEqual(len(_lowercase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 379
| 1
|
"""simple docstring"""
_lowerCAmelCase : List[Any] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : Union[str, Any] , snake_case : str )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = set()
# keep track of all the paths to be checked
UpperCAmelCase__ : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase__ : List[str] = queue.pop(0 )
# get the last node from the path
UpperCAmelCase__ : List[str] = path[-1]
if node not in explored:
UpperCAmelCase__ : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase__ : int = list(UpperCAmelCase__ )
new_path.append(UpperCAmelCase__ )
queue.append(UpperCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : Optional[Any] , snake_case : Union[str, Any] )-> Tuple:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase__ : Dict = [start]
UpperCAmelCase__ : Optional[int] = set(UpperCAmelCase__ )
# Keep tab on distances from `start` node.
UpperCAmelCase__ : Dict = {start: 0, target: -1}
while queue:
UpperCAmelCase__ : Optional[int] = queue.pop(0 )
if node == target:
UpperCAmelCase__ : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCAmelCase__ )
queue.append(UpperCAmelCase__ )
UpperCAmelCase__ : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 438
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self, A, A=13, A=32, A=2, A=3, A=16, A=[1, 2, 1], A=[2, 2, 4], A=2, A=2.0, A=True, A=0.0, A=0.0, A=0.1, A="gelu", A=False, A=True, A=0.02, A=1e-5, A=True, A=None, A=True, A=10, A=8, ):
"""simple docstring"""
lowerCamelCase : int = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Tuple = embed_dim
lowerCamelCase : Dict = depths
lowerCamelCase : Optional[Any] = num_heads
lowerCamelCase : Tuple = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : List[str] = qkv_bias
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Optional[int] = use_absolute_embeddings
lowerCamelCase : Dict = patch_norm
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : Optional[int] = scope
lowerCamelCase : Any = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : Optional[Any] = encoder_stride
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : int = SwinvaModel(config=A )
model.to(A )
model.eval()
lowerCamelCase : str = model(A )
lowerCamelCase : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Tuple = SwinvaForMaskedImageModeling(config=A )
model.to(A )
model.eval()
lowerCamelCase : Tuple = model(A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[str] = SwinvaForMaskedImageModeling(A )
model.to(A )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : List[str] = self.type_sequence_label_size
lowerCamelCase : int = SwinvaForImageClassification(A )
model.to(A )
model.eval()
lowerCamelCase : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( a__ , a__ , unittest.TestCase):
_lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = SwinvaModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self, config_class=A, embed_dim=37 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A, nn.Linear ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = model_class(A )
lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = True
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[Any] = False
lowerCamelCase : str = True
lowerCamelCase : Union[str, Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : Tuple = outputs.attentions
lowerCamelCase : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(A ), A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : List[str] = True
lowerCamelCase : int = config.window_size**2
lowerCamelCase : str = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : str = outputs.attentions
self.assertEqual(len(A ), A )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
lowerCamelCase : Optional[Any] = len(A )
# Check attention is always last and order is fine
lowerCamelCase : List[str] = True
lowerCamelCase : Any = True
lowerCamelCase : Tuple = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(A, A ) )
if hasattr(self.model_tester, 'num_hidden_states_types' ):
lowerCamelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase : Tuple = 2
self.assertEqual(out_len + added_hidden_states, len(A ) )
lowerCamelCase : int = outputs.attentions
self.assertEqual(len(A ), A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
def UpperCAmelCase_ ( self, A, A, A, A ):
"""simple docstring"""
lowerCamelCase : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : Dict = outputs.hidden_states
lowerCamelCase : List[Any] = getattr(
self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ), A )
# Swinv2 has a different seq_length
lowerCamelCase : Any = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
lowerCamelCase : List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(A ), A )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
lowerCamelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(A, A, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase : str = True
self.check_hidden_states_output(A, A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Dict = True
self.check_hidden_states_output(A, A, A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[Any] = 3
lowerCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase : Tuple = True
self.check_hidden_states_output(A, A, A, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[str] = True
self.check_hidden_states_output(A, A, A, (padded_height, padded_width) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = SwinvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = _config_zero_init(A )
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
A )
lowerCamelCase : Union[str, Any] = self.default_image_processor
lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**A )
# verify the logits
lowerCamelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, A )
lowerCamelCase : List[Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1e-4 ) )
| 320
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A : Tuple = logging.get_logger(__name__)
class UpperCamelCase( _a ):
'''simple docstring'''
snake_case_ : str = """vision-encoder-decoder"""
snake_case_ : Union[str, Any] = True
def __init__( self : Optional[Any] , **SCREAMING_SNAKE_CASE : str ) -> Dict:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__snake_case = kwargs.pop("encoder" )
__snake_case = encoder_config.pop("model_type" )
__snake_case = kwargs.pop("decoder" )
__snake_case = decoder_config.pop("model_type" )
__snake_case = AutoConfig.for_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = AutoConfig.for_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : PretrainedConfig , **SCREAMING_SNAKE_CASE : int ) -> PretrainedConfig:
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__snake_case = True
__snake_case = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.encoder.to_dict()
__snake_case = self.decoder.to_dict()
__snake_case = self.__class__.model_type
return output
class UpperCamelCase( _a ):
'''simple docstring'''
snake_case_ : str = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCamelCase( _a ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case = OrderedDict()
__snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"}
__snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"}
__snake_case = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
__snake_case = OrderedDict()
__snake_case = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = dummy_input["input_ids"].shape
__snake_case = (batch, encoder_sequence, self._config.encoder_hidden_size)
__snake_case = dummy_input.pop("input_ids" )
__snake_case = dummy_input.pop("attention_mask" )
__snake_case = torch.zeros(SCREAMING_SNAKE_CASE )
return common_inputs
class UpperCamelCase( _a ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> None:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : PretrainedConfig ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : str = "default" ) -> OnnxConfig:
'''simple docstring'''
__snake_case = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 709
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
A : List[Any] = TypeVar('T')
A : Dict = Union[List[T], Tuple[T, ...]]
A : Any = Union[T, List[T], Dict[str, T]]
A : Optional[int] = Union[str, bytes, os.PathLike]
| 473
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Dict ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_ ( snake_case_ : List[Any] = 50_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
UpperCAmelCase_ = pentagonal_nums[j]
UpperCAmelCase_ = pentagonal_i + pentagonal_j
UpperCAmelCase_ = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 78
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
lowercase = ViTImageProcessor if is_vision_available() else None
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = (3, 32, 128)
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
UpperCamelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
UpperCamelCase = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """test"""
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """test"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.char_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = None
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.randn(1 , 27 , 38 )
UpperCamelCase = torch.randn(1 , 27 , 50257 )
UpperCamelCase = torch.randn(1 , 27 , 30522 )
UpperCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 301
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
a_ = 1
a_ = 1
while repunit:
a_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( UpperCAmelCase = 1_000_000 ) ->int:
"""simple docstring"""
a_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(UpperCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 707
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a_ = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
a_ = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
a_ = tempfile.mkdtemp()
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(self.tmpdirname , __UpperCAmelCase)
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
with open(self.feature_extraction_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
# load decoder from hub
a_ = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[Any]:
a_ = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[int]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.get_tokenizer()
a_ = self.get_feature_extractor()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
a_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase)
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
a_ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha , 5.0)
self.assertEqual(processor.language_model.beta , 3.0)
self.assertEqual(processor.language_model.score_boundary , -7.0)
self.assertEqual(processor.language_model.unk_score_offset , 3)
def UpperCAmelCase__ ( self) ->Any:
a_ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"])
with self.assertRaisesRegex(__UpperCAmelCase , "include"):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = floats_list((3, 10_00))
a_ = feature_extractor(__UpperCAmelCase , return_tensors="np")
a_ = processor(__UpperCAmelCase , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = "This is a test string"
a_ = processor(text=__UpperCAmelCase)
a_ = tokenizer(__UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase__ ( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77) ->Any:
np.random.seed(__UpperCAmelCase)
return np.random.rand(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits(shape=(10, 16) , seed=13)
a_ = processor.decode(__UpperCAmelCase)
a_ = decoder.decode_beams(__UpperCAmelCase)[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text)
self.assertEqual("</s> <s> </s>" , decoded_processor.text)
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score)
@parameterized.expand([[None], ["fork"], ["spawn"]])
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a_ = processor.batch_decode(__UpperCAmelCase)
else:
with get_context(__UpperCAmelCase).Pool() as pool:
a_ = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase)
a_ = list(__UpperCAmelCase)
with get_context("fork").Pool() as p:
a_ = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ , a_ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(__UpperCAmelCase , decoded_processor.text)
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text)
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score)
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits()
a_ = 15
a_ = -20.0
a_ = -4.0
a_ = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
a_ = decoded_processor_out.text
a_ = list(__UpperCAmelCase)
with get_context("fork").Pool() as pool:
a_ = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
a_ = [d[0][0] for d in decoded_decoder_out]
a_ = [d[0][2] for d in decoded_decoder_out]
a_ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCAmelCase)
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCAmelCase , atol=1E-3))
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-15.554, -13.9_474] , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits()
a_ = 2.0
a_ = 5.0
a_ = -20.0
a_ = True
a_ = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
a_ = decoded_processor_out.text
a_ = list(__UpperCAmelCase)
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context("fork").Pool() as pool:
a_ = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
a_ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCAmelCase)
a_ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0)
self.assertEqual(lm_model.beta , 5.0)
self.assertEqual(lm_model.unk_score_offset , -20.0)
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = processor.decoder.model_container[processor.decoder._model_key]
a_ = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute()
a_ = os.listdir(__UpperCAmelCase)
a_ = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = snapshot_download("hf-internal-testing/processor_with_lm")
a_ = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase)
a_ = processor.decoder.model_container[processor.decoder._model_key]
a_ = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute()
a_ = os.listdir(__UpperCAmelCase)
a_ = os.listdir(__UpperCAmelCase)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = floats_list((3, 10_00))
a_ = processor_wavaveca(__UpperCAmelCase , return_tensors="np")
a_ = processor_auto(__UpperCAmelCase , return_tensors="np")
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2)
a_ = self._get_dummy_logits()
a_ = processor_wavaveca.batch_decode(__UpperCAmelCase)
a_ = processor_auto.batch_decode(__UpperCAmelCase)
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text)
def UpperCAmelCase__ ( self) ->str:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = self._get_dummy_logits()[0]
a_ = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue("text" in outputs)
self.assertTrue("word_offsets" in outputs)
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase))
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word")) , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word") , ["<s>", "<s>", "</s>"])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset") , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset") , [1, 3, 5])
def UpperCAmelCase__ ( self) ->List[str]:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = self._get_dummy_logits()
a_ = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue("text" in outputs)
self.assertTrue("word_offsets" in outputs)
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase))
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCAmelCase , "word")) for o in outputs["word_offsets"]] , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word") , ["<s>", "<s>", "</s>"])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset") , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset") , [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase__ ( self) ->List[Any]:
import torch
a_ = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCAmelCase)
a_ = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00))
a_ = iter(__UpperCAmelCase)
a_ = next(__UpperCAmelCase)
a_ = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm")
a_ = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm")
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a_ = processor(sample["audio"]["array"] , return_tensors="pt").input_values
with torch.no_grad():
a_ = model(__UpperCAmelCase).logits.cpu().numpy()
a_ = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase)
a_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a_ = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
a_ = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word")) , __UpperCAmelCase)
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word")) , output.text)
# output times
a_ = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "start_time"))
a_ = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "end_time"))
# fmt: off
a_ = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599])
a_ = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94])
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01))
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01))
| 210
| 0
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
snake_case__ : Any = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
A__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
A__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ConvBertTokenizer
def __init__( self :Any ,__lowercase :Optional[int]=None ,__lowercase :str=None ,__lowercase :Union[str, Any]=True ,__lowercase :Dict="[UNK]" ,__lowercase :List[Any]="[SEP]" ,__lowercase :int="[PAD]" ,__lowercase :Union[str, Any]="[CLS]" ,__lowercase :List[str]="[MASK]" ,__lowercase :List[Any]=True ,__lowercase :List[str]=None ,**__lowercase :List[str] ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Union[str, Any] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : int = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**__lowercase )
snake_case__ : Any = do_lower_case
def __lowerCamelCase ( self :int ,__lowercase :Union[str, Any] ,__lowercase :List[Any]=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 252
| 1
|
from __future__ import annotations
from collections.abc import Generator
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = 2
while True:
UpperCAmelCase__ = factor_map.pop(_lowerCAmelCase , _lowerCAmelCase )
if factor:
UpperCAmelCase__ = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase__ = factor
else:
UpperCAmelCase__ = prime
yield prime
prime += 1
def lowerCAmelCase ( _lowerCAmelCase : float = 1E10 ):
"""simple docstring"""
UpperCAmelCase__ = sieve()
UpperCAmelCase__ = 1
while True:
UpperCAmelCase__ = next(_lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 710
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = (IPNDMScheduler,)
UpperCAmelCase_ = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self :List[str] , **lowerCamelCase :List[Any] ) -> List[str]:
UpperCAmelCase__ = {"num_train_timesteps": 1000}
config.update(**lowerCamelCase )
return config
def UpperCAmelCase_ ( self :str , lowerCamelCase :Union[str, Any]=0 , **lowerCamelCase :str ) -> str:
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("num_inference_steps" , lowerCamelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :Tuple ) -> Tuple:
pass
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[str]=0 , **lowerCamelCase :List[str] ) -> Optional[int]:
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("num_inference_steps" , lowerCamelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :int , **lowerCamelCase :Any ) -> int:
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("num_inference_steps" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , "set_timesteps" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.timesteps[5]
UpperCAmelCase__ = scheduler.timesteps[6]
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self :List[str] ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase , time_step=lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=lowerCamelCase )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 364
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mra'''
def __init__( self : Any , snake_case__ : List[str]=5_0_2_6_5 , snake_case__ : Any=7_6_8 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : str="gelu" , snake_case__ : Any=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Union[str, Any]=1 , snake_case__ : List[Any]=0.02 , snake_case__ : str=1e-5 , snake_case__ : List[Any]="absolute" , snake_case__ : str=4 , snake_case__ : List[str]="full" , snake_case__ : Tuple=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : int=0 , snake_case__ : int=2 , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Tuple = position_embedding_type
UpperCAmelCase__ : List[str] = block_per_row
UpperCAmelCase__ : Optional[Any] = approx_mode
UpperCAmelCase__ : Any = initial_prior_first_n_blocks
UpperCAmelCase__ : List[Any] = initial_prior_diagonal_n_blocks
| 438
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=" " )-> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = text.split(__SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else """""" )
texts.append(__SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> dict:
_SCREAMING_SNAKE_CASE : str = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
_SCREAMING_SNAKE_CASE : List[str] = ctx_encoder(input_ids.to(device=__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> Optional[int]:
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_SCREAMING_SNAKE_CASE : List[Any] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_SCREAMING_SNAKE_CASE : int = dataset.map(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
_SCREAMING_SNAKE_CASE : Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_SCREAMING_SNAKE_CASE : Dict = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
_SCREAMING_SNAKE_CASE : Dict = dataset.map(
partial(__SCREAMING_SNAKE_CASE , ctx_encoder=__SCREAMING_SNAKE_CASE , ctx_tokenizer=__SCREAMING_SNAKE_CASE ) , batched=__SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=__SCREAMING_SNAKE_CASE , )
# And finally save your dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_SCREAMING_SNAKE_CASE : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__SCREAMING_SNAKE_CASE )
# And save the index
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
a = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
a = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
a = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
a = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
a = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
a = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 635
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 635
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Any=32 * 8 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 * 8 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Dict=64 , ) -> Any:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =is_training
__UpperCAmelCase =use_auxiliary_loss
__UpperCAmelCase =num_queries
__UpperCAmelCase =num_channels
__UpperCAmelCase =min_size
__UpperCAmelCase =max_size
__UpperCAmelCase =num_labels
__UpperCAmelCase =hidden_dim
__UpperCAmelCase =hidden_dim
def _a ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5
).float()
__UpperCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long()
__UpperCAmelCase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase =self.num_queries
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =[1, 1, 1, 1]
__UpperCAmelCase =self.num_channels
__UpperCAmelCase =64
__UpperCAmelCase =128
__UpperCAmelCase =self.hidden_dim
__UpperCAmelCase =self.hidden_dim
__UpperCAmelCase =self.hidden_dim
return config
def _a ( self : List[str] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _a ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
__UpperCAmelCase =output.encoder_hidden_states
__UpperCAmelCase =output.pixel_decoder_hidden_states
__UpperCAmelCase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_layers )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[str]:
with torch.no_grad():
__UpperCAmelCase =MaskaFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
__UpperCAmelCase =MaskaFormerForUniversalSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase =model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase : Optional[int] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase : str = False
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : List[str] = False
def _a ( self : str ) -> str:
__UpperCAmelCase =MaskaFormerModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _a ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _a ( self : str ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _a ( self : Any ) -> Any:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _a ( self : Union[str, Any] ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a ( self : List[Any] ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Union[str, Any] ) -> Tuple:
pass
def _a ( self : str ) -> Any:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Dict ) -> str:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase =MaskaFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =(self.model_tester.min_size,) * 2
__UpperCAmelCase ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ),
"""class_labels""": torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(),
}
__UpperCAmelCase =self.model_tester.get_config()
__UpperCAmelCase =MaskaFormerForUniversalSegmentation(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _a ( self : Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _a ( self : Optional[int] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
__UpperCAmelCase =self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
model.train()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1E-4
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ) -> str:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _a ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _a ( self : Optional[Any] ) -> Any:
__UpperCAmelCase =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
__UpperCAmelCase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase =[
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
__UpperCAmelCase =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase =torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _a ( self : int ) -> int:
__UpperCAmelCase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
__UpperCAmelCase =inputs["""pixel_values"""].to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]]
__UpperCAmelCase =[el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 68
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'camembert-base': 512,
}
_UpperCamelCase = '▁'
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ["""input_ids""", """attention_mask"""]
__snake_case : List[Any] = CamembertTokenizer
def __init__( self :List[Any] , __lowercase :Optional[int]=None , __lowercase :str=None , __lowercase :Optional[Any]="<s>" , __lowercase :List[str]="</s>" , __lowercase :Tuple="</s>" , __lowercase :int="<s>" , __lowercase :Union[str, Any]="<unk>" , __lowercase :Optional[int]="<pad>" , __lowercase :Union[str, Any]="<mask>" , __lowercase :Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **__lowercase :List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[int] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
__lowerCamelCase : Any =vocab_file
__lowerCamelCase : Union[str, Any] =False if not self.vocab_file else True
def __lowercase ( self :List[str] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple =[self.cls_token_id]
__lowerCamelCase : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self :Tuple , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : Any =[self.sep_token_id]
__lowerCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : List[str] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 179
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A_ ( snake_case__ , snake_case__ ):
UpperCAmelCase__ = '''dinat'''
UpperCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , __lowerCamelCase : str=4 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Any=6_4 , __lowerCamelCase : Optional[int]=[3, 4, 6, 5] , __lowerCamelCase : int=[2, 4, 8, 1_6] , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase : Dict=3.0 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Any=0.02 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , **__lowerCamelCase : List[str] , ) -> List[str]:
super().__init__(**_A )
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = len(_A )
__magic_name__ = num_heads
__magic_name__ = kernel_size
__magic_name__ = dilations
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ = int(embed_dim * 2 ** (len(_A ) - 1) )
__magic_name__ = layer_scale_init_value
__magic_name__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(_A ) + 1 )]
__magic_name__ = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 711
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase:list[int] , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
__magic_name__ = [e for e in lst if e < pivot]
__magic_name__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=3 , __lowercase : str=32 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=10 , __lowercase : int=[10, 20, 30, 40] , __lowercase : Any=[1, 1, 2, 1] , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=True , __lowercase : str="relu" , __lowercase : int=3 , __lowercase : Any=None , ) -> Optional[int]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : str = embeddings_size
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : str = depths
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Any = len(__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = TFResNetModel(config=__lowercase )
__UpperCAmelCase : Any = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[Any] = TFResNetForImageClassification(__lowercase )
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Union[str, Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : List[str] = False
a : List[Any] = False
a : Tuple = False
a : List[Any] = False
a : Union[str, Any] = False
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TFResNetModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : Any = layer_type
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Tuple = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1e-4 ) )
| 63
|
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait()
| 691
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Dict = StableDiffusionInpaintPipeline
__a : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a : Optional[Any] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a : Tuple = frozenset([])
def A__ ( self ) ->int:
torch.manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase__ :Optional[Any] = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
UpperCAmelCase__ :Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase__ :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
UpperCAmelCase__ :Union[str, Any] = CLIPTextModel(A )
UpperCAmelCase__ :Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ :Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A__ ( self , A , A=0 ) ->List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase__ :Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase__ :Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ :int = Image.fromarray(np.uinta(A ) ).convert('RGB' ).resize((64, 64) )
UpperCAmelCase__ :Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(A ).startswith('mps' ):
UpperCAmelCase__ :Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase__ :Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase__ :List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ :Tuple = self.get_dummy_components()
UpperCAmelCase__ :Any = StableDiffusionInpaintPipeline(**A )
UpperCAmelCase__ :int = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ :List[str] = self.get_dummy_inputs(A )
UpperCAmelCase__ :List[str] = sd_pipe(**A ).images
UpperCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ :List[str] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) ->Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) ->Optional[Any]:
UpperCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ :Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ :List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCAmelCase__ :Tuple = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ :Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
UpperCAmelCase__ :Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ :Dict = torch.manual_seed(0 )
UpperCAmelCase__ :Tuple = pipe(
prompt=A , image=A , mask_image=A , generator=A , output_type='np' , )
UpperCAmelCase__ :List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A__ ( self ) ->str:
UpperCAmelCase__ :Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ :int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ :List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCAmelCase__ :Dict = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ :Dict = StableDiffusionInpaintPipeline.from_pretrained(
A , torch_dtype=torch.floataa , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
UpperCAmelCase__ :int = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ :int = torch.manual_seed(0 )
UpperCAmelCase__ :int = pipe(
prompt=A , image=A , mask_image=A , generator=A , output_type='np' , )
UpperCAmelCase__ :int = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A__ ( self ) ->Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ :List[str] = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ :str = PNDMScheduler.from_pretrained(A , subfolder='scheduler' )
UpperCAmelCase__ :str = StableDiffusionInpaintPipeline.from_pretrained(
A , safety_checker=A , scheduler=A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ :int = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ :List[Any] = torch.manual_seed(0 )
UpperCAmelCase__ :Tuple = pipe(
prompt=A , image=A , mask_image=A , generator=A , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase__ :int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 433
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=99 , A=13 , A=16 , A=7 , A=True , A=True , A=True , A=False , A=True , A=2 , A=32 , A=4 , A=4 , A=30 , A=0 , A=1 , A=2 , A=None , ) ->Optional[Any]:
UpperCAmelCase__ :str = parent
UpperCAmelCase__ :int = batch_size
UpperCAmelCase__ :Tuple = decoder_seq_length
# For common tests
UpperCAmelCase__ :Union[str, Any] = self.decoder_seq_length
UpperCAmelCase__ :int = is_training
UpperCAmelCase__ :List[Any] = use_attention_mask
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Any = vocab_size
UpperCAmelCase__ :Dict = d_model
UpperCAmelCase__ :Union[str, Any] = d_model
UpperCAmelCase__ :str = decoder_layers
UpperCAmelCase__ :int = decoder_layers
UpperCAmelCase__ :List[Any] = decoder_ffn_dim
UpperCAmelCase__ :Any = decoder_attention_heads
UpperCAmelCase__ :Any = decoder_attention_heads
UpperCAmelCase__ :Optional[int] = eos_token_id
UpperCAmelCase__ :Optional[int] = bos_token_id
UpperCAmelCase__ :Union[str, Any] = pad_token_id
UpperCAmelCase__ :Optional[int] = decoder_start_token_id
UpperCAmelCase__ :Optional[Any] = use_cache
UpperCAmelCase__ :Tuple = max_position_embeddings
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :int = decoder_seq_length
UpperCAmelCase__ :Optional[int] = 2
UpperCAmelCase__ :Optional[Any] = 1
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :str = None
if self.use_attention_mask:
UpperCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :List[Any] = True
UpperCAmelCase__ :Any = TrOCRDecoder(config=A ).to(A ).eval()
UpperCAmelCase__ :Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCAmelCase__ :Optional[int] = model(A , use_cache=A )
UpperCAmelCase__ :Optional[int] = model(A )
UpperCAmelCase__ :Optional[Any] = model(A , use_cache=A )
self.parent.assertTrue(len(A ) == len(A ) )
self.parent.assertTrue(len(A ) == len(A ) + 1 )
UpperCAmelCase__ :int = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ :Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCAmelCase__ :Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ :str = model(A )['last_hidden_state']
UpperCAmelCase__ :List[str] = model(A , past_key_values=A )['last_hidden_state']
# select random slice
UpperCAmelCase__ :Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ :Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCAmelCase__ :Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A , A , atol=1e-3 )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__a : Optional[int] = (TrOCRForCausalLM,) if is_torch_available() else ()
__a : List[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
__a : Any = True
__a : Union[str, Any] = False
def A__ ( self ) ->str:
UpperCAmelCase__ :Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=A )
UpperCAmelCase__ :Tuple = ConfigTester(self , config_class=A )
def A__ ( self ) ->Optional[Any]:
pass
def A__ ( self ) ->Any:
pass
def A__ ( self ) ->Union[str, Any]:
pass
def A__ ( self ) ->Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) ->Optional[Any]:
UpperCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A )
def A__ ( self ) ->int:
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def A__ ( self ) ->List[str]:
pass
| 433
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase( _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [True] * limit
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCAmelCase__ : int = i * 2
while index < limit:
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Union[str, Any] = index + i
UpperCAmelCase__ : List[str] = [2]
for i in range(3 , UpperCamelCase_ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase_ )
return primes
def __UpperCamelCase( _A : int = 1_00_00_00 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = prime_sieve(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : str = 0
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + length , len(UpperCamelCase_ ) ):
UpperCAmelCase__ : Tuple = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase__ : int = j - i
UpperCAmelCase__ : Optional[int] = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 614
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase : Dict = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_lowerCAmelCase : List[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_lowerCAmelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_lowerCAmelCase : List[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_lowerCAmelCase : List[Any] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase : List[str] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_lowerCAmelCase : Optional[Any] = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase , lowerCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
lowerCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCamelCase , lowerCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a_ ( UpperCamelCase_ : int = 1_0_0 ) -> int:
"""simple docstring"""
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize('hand, expected' , UpperCamelCase_ )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , UpperCamelCase_ )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , UpperCamelCase_ )
def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , UpperCamelCase_ )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , UpperCamelCase_ )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , UpperCamelCase_ )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
lowerCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
lowerCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase = PokerHand('2C 4S AS 3D 5C' )
lowerCamelCase = True
lowerCamelCase = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = 0
lowerCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCamelCase = os.path.join(UpperCamelCase_ , 'poker_hands.txt' )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
lowerCamelCase = line[:1_4].strip()
lowerCamelCase = line[1_5:].strip()
lowerCamelCase , lowerCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
lowerCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 246
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688
|
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 518
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a = False
try:
a = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = [] ):
_UpperCAmelCase = 0
_UpperCAmelCase = choices
_UpperCAmelCase = prompt
if sys.platform == "win32":
_UpperCAmelCase = """*"""
else:
_UpperCAmelCase = """➔ """
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 ):
_UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def UpperCAmelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def UpperCAmelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def UpperCAmelCase ( self ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def UpperCAmelCase ( self ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def UpperCAmelCase ( self ):
_UpperCAmelCase = int(chr(self.current_selection ) )
_UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_UpperCAmelCase = int(builtins.input() )
except ValueError:
_UpperCAmelCase = default_choice
else:
_UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , """\n""" )
return choice
| 518
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __snake_case :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : str = "cpu" , lowerCamelCase : str = "openai/clip-vit-large-patch14" ) -> None:
lowerCAmelCase_ : int = device
lowerCAmelCase_ : Optional[Any] = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
lowerCAmelCase_ : Tuple = [0.26_862_954, 0.26_130_258, 0.27_577_711]
lowerCAmelCase_ : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCAmelCase_ : int = torchvision.transforms.Resize(2_24 )
lowerCAmelCase_ : Optional[int] = torchvision.transforms.CenterCrop(2_24 )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> str:
lowerCAmelCase_ : Tuple = self.resize(__lowerCamelCase )
lowerCAmelCase_ : Dict = self.center_crop(__lowerCamelCase )
lowerCAmelCase_ : Any = self.normalize(__lowerCamelCase )
return images
def __call__( self : List[str] , lowerCamelCase : int=None , lowerCamelCase : int=None , **lowerCamelCase : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : Any = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self.preprocess_img(__lowerCamelCase )
lowerCAmelCase_ : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : List[str]=10 , lowerCamelCase : Tuple=0.01 , lowerCamelCase : Dict=None , lowerCamelCase : List[Any]=None , lowerCamelCase : str=None , lowerCamelCase : str=None , lowerCamelCase : List[str]=None , lowerCamelCase : int=None , lowerCamelCase : Dict=False , lowerCamelCase : Optional[Any]=True , lowerCamelCase : str="image" , lowerCamelCase : Optional[Any]=True , lowerCamelCase : List[str]=False , lowerCamelCase : str=False , lowerCamelCase : List[str]=False , ) -> None:
super().__init__()
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : str = device if device else get_device()
if vqgan:
lowerCAmelCase_ : List[str] = vqgan
else:
lowerCAmelCase_ : Dict = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
lowerCAmelCase_ : Optional[Any] = clip
else:
lowerCAmelCase_ : Dict = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
lowerCAmelCase_ : Optional[Any] = ProcessorGradientFlow(device=self.device )
lowerCAmelCase_ : Dict = iterations
lowerCAmelCase_ : Tuple = lr
lowerCAmelCase_ : Tuple = log
lowerCAmelCase_ : Optional[int] = make_grid
lowerCAmelCase_ : str = return_val
lowerCAmelCase_ : List[Any] = quantize
lowerCAmelCase_ : str = self.vqgan.decoder.z_shape
def __lowercase ( self : int , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Dict=None , lowerCamelCase : int=5 , lowerCamelCase : str=True ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = []
if output_path is None:
lowerCAmelCase_ : List[Any] = "./animation.gif"
if input_path is None:
lowerCAmelCase_ : str = self.save_path
lowerCAmelCase_ : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(__lowerCamelCase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__lowerCamelCase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
lowerCAmelCase_ : Union[str, Any] = total_duration / len(__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
lowerCAmelCase_ : int = 1.5
lowerCAmelCase_ : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase )
print(F'gif saved to {output_path}' )
def __lowercase ( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=None ) -> Union[str, Any]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
lowerCAmelCase_ : List[str] = preprocess(Image.open(__lowerCamelCase ) , target_image_size=2_56 ).to(self.device )
lowerCAmelCase_ : str = preprocess_vqgan(__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self.vqgan.encode(__lowerCamelCase )
return z
def __lowercase ( self : Dict , lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.latent.detach().requires_grad_()
lowerCAmelCase_ : List[str] = base_latent + transform_vector
if self.quantize:
lowerCAmelCase_ : Union[str, Any] = self.vqgan.quantize(__lowerCamelCase )
else:
lowerCAmelCase_ : str = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def __lowercase ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=None ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="""pt""" , padding=__lowerCamelCase )
lowerCAmelCase_ : int = self.clip(**__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
lowerCAmelCase_ : Any = similarity_logits * weights
return similarity_logits.sum()
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ) -> str:
lowerCAmelCase_ : Any = self._get_clip_similarity(pos_prompts["""prompts"""] , __lowerCamelCase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
lowerCAmelCase_ : List[Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , __lowerCamelCase , weights=neg_prompts["""weights"""] )
else:
lowerCAmelCase_ : Tuple = torch.tensor([1] , device=self.device )
lowerCAmelCase_ : List[Any] = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def __lowercase ( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> str:
lowerCAmelCase_ : Any = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device )
lowerCAmelCase_ : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCAmelCase_ : List[str] = self._add_vector(__lowerCamelCase )
lowerCAmelCase_ : List[Any] = loop_post_process(__lowerCamelCase )
lowerCAmelCase_ : Any = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print("""CLIP loss""" , __lowerCamelCase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __lowercase ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
wandb.init(reinit=__lowerCamelCase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
lowerCAmelCase_ : List[str] = Image.open(__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(__lowerCamelCase ) )
def __lowercase ( self : Optional[int] , lowerCamelCase : Optional[Any] ) -> List[Any]:
if not prompts:
return []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : int = []
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCAmelCase_ : int = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__lowerCamelCase , (tuple, list) ):
lowerCAmelCase_ : List[Any] = prompt[0]
lowerCAmelCase_ : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
lowerCAmelCase_ : Optional[Any] = prompt.split(""":""" )
lowerCAmelCase_ : Union[str, Any] = float(__lowerCamelCase )
else:
lowerCAmelCase_ : Tuple = prompt
lowerCAmelCase_ : Optional[int] = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase , device=self.device ),
}
def __lowercase ( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]=False , lowerCamelCase : int=True , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=None , ) -> Optional[int]:
if image_path:
lowerCAmelCase_ : str = self._get_latent(__lowerCamelCase )
else:
lowerCAmelCase_ : List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCAmelCase_ : List[str] = self.process_prompts(__lowerCamelCase )
lowerCAmelCase_ : Dict = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
lowerCAmelCase_ : Any = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
lowerCAmelCase_ : List[Any] = save_path + "_" + get_timestamp()
os.makedirs(__lowerCamelCase )
lowerCAmelCase_ : Dict = save_path
lowerCAmelCase_ : List[str] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__lowerCamelCase ) )
lowerCAmelCase_ : Union[str, Any] = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 717
|
'''simple docstring'''
from itertools import count
def UpperCamelCase_ ( A__ : int = 50 ):
'''simple docstring'''
lowerCAmelCase_ : Any = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 398
| 0
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : Any = """dummy_data"""
lowerCAmelCase_ : List[str] = """datasets"""
lowerCAmelCase_ : Dict = False
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = dataset_name
UpperCAmelCase__ = cache_dir
UpperCAmelCase__ = use_local_dummy_data
UpperCAmelCase__ = config
# download_callbacks take a single url as input
UpperCAmelCase__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase__ = str(_UpperCAmelCase )
# to be downloaded
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self._dummy_file is None:
UpperCAmelCase__ = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase__ = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self._bucket_url is None:
UpperCAmelCase__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Any , *_UpperCAmelCase : List[Any] ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , *_UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : int , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
return path
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return {}
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
UpperCAmelCase__ = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
UpperCAmelCase__ = single_urls
UpperCAmelCase__ = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
UpperCAmelCase__ = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase__ = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , _UpperCAmelCase ) ) for url in data_url )
UpperCAmelCase__ = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase__ = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase__ = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase__ = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str ):
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Dict ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase__ = Path(self.dummy_file ).parent
UpperCAmelCase__ = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
UpperCAmelCase__ = Path(_UpperCAmelCase )
UpperCAmelCase__ = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open("""rb""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 603
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603
| 1
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_a : int= "base_with_context"
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case : Union[str, Any] = weights[F"layers_{lyr_num}"]
__snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__snake_case : Any = ly_weight['attention']
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case : Union[str, Any] = weights[F"layers_{lyr_num}"]
__snake_case : List[str] = ly_weight['attention']
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__snake_case : List[str] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase_ )
__snake_case : List[str] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__snake_case : List[Any] = weights[F"layers_{lyr_num}"]
__snake_case : str = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__snake_case : Optional[Any] = ly_weight['self_attention']
__snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case : Optional[Any] = ly_weight['MultiHeadDotProductAttention_0']
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__snake_case : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__snake_case : List[str] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase_ )
__snake_case : List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__snake_case : List[str] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__snake_case : Optional[Any] = inference.parse_training_gin_file(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : int = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase_ )
__snake_case : List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__snake_case : Optional[int] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__snake_case : Optional[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__snake_case : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__snake_case : List[str] = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , UpperCAmelCase_ )
__snake_case : Any = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , UpperCAmelCase_ )
__snake_case : Optional[Any] = load_decoder(ta_checkpoint['target']['decoder'] , UpperCAmelCase_ )
__snake_case : Optional[int] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__snake_case : List[str] = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_a : str= argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_a : str= parser.parse_args()
main(args)
| 713
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( lowercase ):
@staticmethod
@abstractmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
raise NotImplementedError()
@abstractmethod
def _lowercase (self : Any) -> Optional[Any]:
raise NotImplementedError()
| 192
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Union[str, Any] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """efficientnet"""
def __init__( self : Optional[int] , a_ : int = 3 , a_ : int = 600 , a_ : float = 2.0 , a_ : float = 3.1 , a_ : int = 8 , a_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , a_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , a_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , a_ : List[int] = [] , a_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , a_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , a_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , a_ : float = 0.25 , a_ : str = "swish" , a_ : int = 2_560 , a_ : str = "mean" , a_ : float = 0.02 , a_ : float = 0.001 , a_ : float = 0.99 , a_ : float = 0.5 , a_ : float = 0.2 , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = num_channels
__snake_case = image_size
__snake_case = width_coefficient
__snake_case = depth_coefficient
__snake_case = depth_divisor
__snake_case = kernel_sizes
__snake_case = in_channels
__snake_case = out_channels
__snake_case = depthwise_padding
__snake_case = strides
__snake_case = num_block_repeats
__snake_case = expand_ratios
__snake_case = squeeze_expansion_ratio
__snake_case = hidden_act
__snake_case = hidden_dim
__snake_case = pooling_type
__snake_case = initializer_range
__snake_case = batch_norm_eps
__snake_case = batch_norm_momentum
__snake_case = dropout_rate
__snake_case = drop_connect_rate
__snake_case = sum(a_ ) * 4
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : List[str] ):
"""simple docstring"""
return 1e-5
| 69
|
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33
| 0
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCAmelCase ( __lowerCamelCase:Union[str, Any] , __lowerCamelCase:Optional[Any] ):
'''simple docstring'''
__magic_name__ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
__magic_name__ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("RGB" )
__magic_name__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
__magic_name__ = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
return image
def _lowerCAmelCase ( __lowerCamelCase:Tuple ):
'''simple docstring'''
if "visual_encoder" in key:
__magic_name__ = re.sub("visual_encoder*" , "vision_model.encoder" , __lowerCamelCase )
if "blocks" in key:
__magic_name__ = re.sub(r"blocks" , "layers" , __lowerCamelCase )
if "attn" in key:
__magic_name__ = re.sub(r"attn" , "self_attn" , __lowerCamelCase )
if "norm1" in key:
__magic_name__ = re.sub(r"norm1" , "layer_norm1" , __lowerCamelCase )
if "norm2" in key:
__magic_name__ = re.sub(r"norm2" , "layer_norm2" , __lowerCamelCase )
if "encoder.norm" in key:
__magic_name__ = re.sub(r"encoder.norm" , "post_layernorm" , __lowerCamelCase )
if "encoder.patch_embed.proj" in key:
__magic_name__ = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __lowerCamelCase )
if "encoder.pos_embed" in key:
__magic_name__ = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , __lowerCamelCase )
if "encoder.cls_token" in key:
__magic_name__ = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , __lowerCamelCase )
if "self_attn" in key:
__magic_name__ = re.sub(r"self_attn.proj" , "self_attn.projection" , __lowerCamelCase )
return key
@torch.no_grad()
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] , __lowerCamelCase:Optional[Any]=None ):
'''simple docstring'''
if config_path is not None:
__magic_name__ = BlipConfig.from_pretrained(__lowerCamelCase )
else:
__magic_name__ = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__magic_name__ = BlipForConditionalGeneration(__lowerCamelCase ).eval()
__magic_name__ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
__magic_name__ = blip_decoder(pretrained=__lowerCamelCase , image_size=3_8_4 , vit="base" )
__magic_name__ = pt_model.eval()
__magic_name__ = pt_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(__lowerCamelCase )
__magic_name__ = rename_key(__lowerCamelCase )
__magic_name__ = value
hf_model.load_state_dict(__lowerCamelCase )
__magic_name__ = 3_8_4
__magic_name__ = load_demo_image(image_size=__lowerCamelCase , device="cpu" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
__magic_name__ = tokenizer(["a picture of"] ).input_ids
__magic_name__ = hf_model.generate(__lowerCamelCase , __lowerCamelCase )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__magic_name__ = hf_model.generate(__lowerCamelCase )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__magic_name__ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
__magic_name__ = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="base" )
vqa_model.eval()
__magic_name__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(__lowerCamelCase )
__magic_name__ = rename_key(__lowerCamelCase )
__magic_name__ = value
__magic_name__ = BlipForQuestionAnswering(__lowerCamelCase )
hf_vqa_model.load_state_dict(__lowerCamelCase )
__magic_name__ = ["How many dogs are in this image?"]
__magic_name__ = tokenizer(__lowerCamelCase , return_tensors="pt" ).input_ids
__magic_name__ = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
__magic_name__ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
__magic_name__ = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="base" )
itm_model.eval()
__magic_name__ = itm_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(__lowerCamelCase )
__magic_name__ = rename_key(__lowerCamelCase )
__magic_name__ = value
__magic_name__ = BlipForImageTextRetrieval(__lowerCamelCase )
__magic_name__ = ["A picture of a woman with a dog sitting in a beach"]
__magic_name__ = tokenizer(
__lowerCamelCase , return_tensors="pt" , padding="max_length" , truncation=__lowerCamelCase , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(__lowerCamelCase )
hf_itm_model.eval()
__magic_name__ = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
__magic_name__ = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 719
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : str=2 , __lowerCamelCase : int=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=9_9 , __lowerCamelCase : int=3_6 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Dict=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Optional[Any]=1_6 , __lowerCamelCase : Any=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Tuple=6 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=1_0_0_0 , ) -> int:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__magic_name__ = text_seq_length
__magic_name__ = (image_size // patch_size) ** 2 + 1
__magic_name__ = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Optional[Any] ) -> List[Any]:
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__magic_name__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ = bbox[i, j, 3]
__magic_name__ = bbox[i, j, 1]
__magic_name__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ = bbox[i, j, 2]
__magic_name__ = bbox[i, j, 0]
__magic_name__ = tmp_coordinate
__magic_name__ = tf.constant(__lowerCamelCase )
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.text_seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__magic_name__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : str ) -> Union[str, Any]:
__magic_name__ = TFLayoutLMvaModel(config=__lowerCamelCase )
# text + image
__magic_name__ = model(__lowerCamelCase , pixel_values=__lowerCamelCase , training=__lowerCamelCase )
__magic_name__ = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , training=__lowerCamelCase , )
__magic_name__ = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__magic_name__ = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__magic_name__ = model({"pixel_values": pixel_values} , training=__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> List[str]:
__magic_name__ = self.num_labels
__magic_name__ = TFLayoutLMvaForSequenceClassification(config=__lowerCamelCase )
__magic_name__ = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> Dict:
__magic_name__ = self.num_labels
__magic_name__ = TFLayoutLMvaForTokenClassification(config=__lowerCamelCase )
__magic_name__ = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ) -> List[Any]:
__magic_name__ = 2
__magic_name__ = TFLayoutLMvaForQuestionAnswering(config=__lowerCamelCase )
__magic_name__ = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , training=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[int] ) -> str:
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any ) -> Union[str, Any]:
return True
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False ) -> dict:
__magic_name__ = copy.deepcopy(__lowerCamelCase )
if model_class in get_values(__lowerCamelCase ):
__magic_name__ = {
k: tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__lowerCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__magic_name__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCamelCase ):
__magic_name__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__magic_name__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCamelCase ):
__magic_name__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCamelCase ):
__magic_name__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self : List[str] ) -> str:
__magic_name__ = TFLayoutLMvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def _snake_case ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self : int ) -> Tuple:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(__lowerCamelCase )
if getattr(__lowerCamelCase , "hf_compute_loss" , __lowerCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__magic_name__ = self._prepare_for_class(inputs_dict.copy() , __lowerCamelCase , return_labels=__lowerCamelCase )
__magic_name__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__lowerCamelCase )[0]
]
__magic_name__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__magic_name__ = self._prepare_for_class(inputs_dict.copy() , __lowerCamelCase , return_labels=__lowerCamelCase )
__magic_name__ = prepared_for_class.pop("input_ids" )
__magic_name__ = model(__lowerCamelCase , **__lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__magic_name__ = self._prepare_for_class(inputs_dict.copy() , __lowerCamelCase , return_labels=__lowerCamelCase )
__magic_name__ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
__magic_name__ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__magic_name__ = -1_0_0
__magic_name__ = tf.convert_to_tensor(__lowerCamelCase )
__magic_name__ = model(__lowerCamelCase , **__lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__magic_name__ = self._prepare_for_class(inputs_dict.copy() , __lowerCamelCase , return_labels=__lowerCamelCase )
__magic_name__ = model(__lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__magic_name__ = self._prepare_for_class(inputs_dict.copy() , __lowerCamelCase , return_labels=__lowerCamelCase )
# Get keys that were added with the _prepare_for_class function
__magic_name__ = prepared_for_class.keys() - inputs_dict.keys()
__magic_name__ = inspect.signature(model.call ).parameters
__magic_name__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__magic_name__ = {0: "input_ids"}
for label_key in label_keys:
__magic_name__ = signature_names.index(__lowerCamelCase )
__magic_name__ = label_key
__magic_name__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__magic_name__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__magic_name__ = prepared_for_class[value]
__magic_name__ = tuple(__lowerCamelCase )
# Send to model
__magic_name__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self : Dict ) -> Dict:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ) -> int:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ) -> List[Any]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Any ) -> List[str]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Optional[int]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : int ) -> Any:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFLayoutLMvaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
__magic_name__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self : List[Any] ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase ) if is_vision_available() else None
@slow
def _snake_case ( self : Dict ) -> Dict:
__magic_name__ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__lowerCamelCase , return_tensors="tf" ).pixel_values
__magic_name__ = tf.constant([[1, 2]] )
__magic_name__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__magic_name__ = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
__magic_name__ = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
__magic_name__ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 468
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 600
|
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600
| 1
|
'''simple docstring'''
lowerCAmelCase_ = 0 # The first color of the flag.
lowerCAmelCase_ = 1 # The second color of the flag.
lowerCAmelCase_ = 2 # The third color of the flag.
lowerCAmelCase_ = (red, white, blue)
def lowerCAmelCase( a__ : list ):
'''simple docstring'''
if not sequence:
return []
if len(a__ ) == 1:
return list(a__ )
lowerCamelCase__ = 0
lowerCamelCase__ = len(a__ ) - 1
lowerCamelCase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ = sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(a__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase_ = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 702
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase( a__ : Namespace ):
'''simple docstring'''
return TrainCommand(a__ )
class snake_case_ ( A__ ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( UpperCamelCase):
lowerCamelCase__ = parser.add_parser("train" , help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data" , type=UpperCamelCase , required=UpperCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCamelCase , default=0 , help="Column of the dataset csv file with example labels.")
train_parser.add_argument(
"--column_text" , type=UpperCamelCase , default=1 , help="Column of the dataset csv file with example texts.")
train_parser.add_argument(
"--column_id" , type=UpperCamelCase , default=2 , help="Column of the dataset csv file with example ids.")
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers).")
train_parser.add_argument("--validation_data" , type=UpperCamelCase , default="" , help="path to validation dataset.")
train_parser.add_argument(
"--validation_split" , type=UpperCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCamelCase , default="./" , help="path to saved the trained model.")
train_parser.add_argument(
"--task" , type=UpperCamelCase , default="text_classification" , help="Task to train the model on.")
train_parser.add_argument(
"--model" , type=UpperCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model.")
train_parser.add_argument("--train_batch_size" , type=UpperCamelCase , default=32 , help="Batch size for training.")
train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase , default=64 , help="Batch size for validation.")
train_parser.add_argument("--learning_rate" , type=UpperCamelCase , default=3E-5 , help="Learning rate.")
train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase , default=1E-0_8 , help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=UpperCamelCase)
def __init__( self , UpperCamelCase):
lowerCamelCase__ = logging.get_logger("transformers-cli/training")
lowerCamelCase__ = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCamelCase)
lowerCamelCase__ = args.output
lowerCamelCase__ = args.column_label
lowerCamelCase__ = args.column_text
lowerCamelCase__ = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""")
if args.task == "text_classification":
lowerCamelCase__ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""")
lowerCamelCase__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase__ = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""")
lowerCamelCase__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase__ = args.validation_split
lowerCamelCase__ = args.train_batch_size
lowerCamelCase__ = args.valid_batch_size
lowerCamelCase__ = args.learning_rate
lowerCamelCase__ = args.adam_epsilon
def __UpperCAmelCase ( self):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self):
raise NotImplementedError
def __UpperCAmelCase ( self):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 426
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ : int =100
lowerCAmelCase__ : List[Any] =set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def __lowercase ( a__ ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __lowercase ( a__ = 50_00 ) -> int | None:
for number_to_partition in range(1 , a__ ):
if len(partition(a__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 148
|
import numpy as np
def __lowercase ( a__ , a__ , a__ = 1E-12 , a__ = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(a__ )[0] == np.shape(a__ )[1]
# Ensure proper dimensionality.
assert np.shape(a__ )[0] == np.shape(a__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a__ ) == np.iscomplexobj(a__ )
__SCREAMING_SNAKE_CASE = np.iscomplexobj(a__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1E12
while not convergence:
# Multiple matrix by the vector.
__SCREAMING_SNAKE_CASE = np.dot(a__ , a__ )
# Normalize the resulting output vector.
__SCREAMING_SNAKE_CASE = w / np.linalg.norm(a__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
__SCREAMING_SNAKE_CASE = np.dot(a__ , np.dot(a__ , a__ ) )
# Check convergence.
__SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = lambda_
if is_complex:
__SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowercase ( ) -> None:
__SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] )
__SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
__SCREAMING_SNAKE_CASE = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__SCREAMING_SNAKE_CASE = real_input_matrix
__SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
__SCREAMING_SNAKE_CASE = complex_input_matrix
__SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = power_iteration(a__ , a__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.linalg.eigh(a__ )
# Last eigenvalue is the maximum one.
__SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a__ ) - np.abs(a__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 148
| 1
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : Any ) -> Dict:
'''simple docstring'''
_A = len(SCREAMING_SNAKE_CASE_ ) // 2
# choose the middle 3 elements
_A = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a = 2
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
_UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : List[str]=None , ):
_A , _A , _A , _A = bos, unk, pad, eos
_A = []
_A = []
_A = {}
_A = self.add_symbol(_UpperCAmelCase )
_A = self.add_symbol(_UpperCAmelCase )
_A = self.add_symbol(_UpperCAmelCase )
_A = self.add_symbol(_UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_UpperCAmelCase )
_A = len(self.symbols )
def __eq__( self : int , _UpperCAmelCase : Optional[Any] ):
return self.indices == other.indices
def __getitem__( self : List[str] , _UpperCAmelCase : str ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
return len(self.symbols )
def __contains__( self : Union[str, Any] , _UpperCAmelCase : str ):
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls : str , _UpperCAmelCase : Tuple ):
_A = cls()
d.add_from_file(_UpperCAmelCase )
return d
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Tuple=False ):
if word in self.indices and not overwrite:
_A = self.indices[word]
_A = self.count[idx] + n
return idx
else:
_A = len(self.symbols )
_A = idx
self.symbols.append(_UpperCAmelCase )
self.count.append(_UpperCAmelCase )
return idx
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple ):
return 0
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_UpperCAmelCase ) )
return
_A = f.readlines()
_A = self._load_meta(_UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_A , _A = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_A = True
_A , _A = line.rsplit(' ' , 1 )
else:
_A = False
_A = int(_UpperCAmelCase )
_A = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_UpperCAmelCase ) )
self.add_symbol(_UpperCAmelCase , n=_UpperCAmelCase , overwrite=_UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _snake_case ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
_A = dict((re.sub(R'@@$' , '' , _snake_case ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _snake_case ), v) for k, v in d.items() )
_A = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_A = d[k] # restore
return da
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
if not os.path.exists(_snake_case ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_A = os.path.join(_snake_case , 'checkpoint.pt' )
if not os.path.isfile(_snake_case ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
_A = torch.load(_snake_case , map_location='cpu' )
_A = chkpt['cfg']['model']
# dicts
_A = os.path.join(_snake_case , 'dict.txt' )
if not os.path.isfile(_snake_case ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
_A = Dictionary.load(_snake_case )
_A = rewrite_dict_keys(src_dict.indices )
_A = len(_snake_case )
_A = os.path.join(_snake_case , VOCAB_FILES_NAMES['vocab_file'] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# merges_file (bpecodes)
_A = os.path.join(_snake_case , 'bpecodes' )
if not os.path.isfile(_snake_case ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
_A = os.path.join(_snake_case , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(_snake_case , _snake_case )
# model config
_A = os.path.join(_snake_case , 'config.json' )
_A = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# tokenizer config
_A = os.path.join(_snake_case , _snake_case )
_A = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# model
_A = chkpt['model']
# remove unneeded keys
_A = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(_snake_case , _snake_case )
_A = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_A = model_state_dict.pop(_snake_case )
else:
_A = model_state_dict.pop(_snake_case )
_A = BioGptConfig.from_pretrained(_snake_case )
_A = BioGptForCausalLM(_snake_case )
# check that it loads ok
model_new.load_state_dict(_snake_case )
# save
_A = os.path.join(_snake_case , _snake_case )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_snake_case , _snake_case )
print('Conversion is done!' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 505
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
snake_case = """nllb-moe"""
snake_case = ["""past_key_values"""]
snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCAmelCase_=12_81_12 , UpperCAmelCase_=10_24 , UpperCAmelCase_=12 , UpperCAmelCase_=40_96 , UpperCAmelCase_=16 , UpperCAmelCase_=12 , UpperCAmelCase_=40_96 , UpperCAmelCase_=16 , UpperCAmelCase_=0.05 , UpperCAmelCase_=0.05 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_="relu" , UpperCAmelCase_=10_24 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.02 , UpperCAmelCase_=2 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=1_28 , UpperCAmelCase_=64 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_="all" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=1.0 , UpperCAmelCase_=0.2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = router_z_loss_coef
snake_case_ = router_aux_loss_coef
snake_case_ = decoder_sparse_step
snake_case_ = encoder_sparse_step
snake_case_ = num_experts
snake_case_ = expert_capacity
snake_case_ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
snake_case_ = router_dtype
snake_case_ = router_ignore_padding_tokens
snake_case_ = batch_prioritized_routing
snake_case_ = second_expert_policy
snake_case_ = normalize_router_prob_before_dropping
snake_case_ = moe_eval_capacity_token_fraction
snake_case_ = moe_token_dropout
snake_case_ = output_router_logits
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 508
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''EncodecFeatureExtractor'''
A = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = self.feature_extractor
lowerCAmelCase__ :Tuple = False
def snake_case_ ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("sampling_rate" , _lowerCAmelCase )
lowerCAmelCase__ :Dict = kwargs.pop("text" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :Optional[int] = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCAmelCase__ :Any = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if audio is not None:
lowerCAmelCase__ :Tuple = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ :List[str] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCAmelCase__ :int = audio_inputs["padding_mask"]
return inputs
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("padding_mask" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = to_numpy(_lowerCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = to_numpy(_lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ :str = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ :Union[str, Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ :Optional[Any] = np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audio_values.tolist()
for i in range(_lowerCAmelCase ):
lowerCAmelCase__ :str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ :List[Any] = sliced_audio.reshape(_lowerCAmelCase , -1 )
return audio_values
| 145
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , *a : List[str] , **a : Dict ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 69
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 69
| 1
|
import os
import string
import sys
lowercase : Optional[int] = 1 << 8
lowercase : Tuple = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowercase : Any = KEYMAP['''up''']
lowercase : Dict = KEYMAP['''left''']
if sys.platform == "win32":
lowercase : int = []
lowercase : List[Any] = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase : List[str] = ord(str(i))
def lowerCAmelCase__ ( ):
if os.name == "nt":
import msvcrt
snake_case_ : List[Any] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_a ) == 0:
# Read the keystroke
snake_case_ : List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case_ : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case_ : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_a )
if ord(_a ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
snake_case_ : int = chr(KEYMAP["esc"] )
except KeyError:
snake_case_ : Any = cha[1]
else:
snake_case_ : Any = ch.decode(_a )
else:
snake_case_ : Dict = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case_ : Optional[Any] = sys.stdin.fileno()
snake_case_ : int = termios.tcgetattr(_a )
try:
tty.setraw(_a )
snake_case_ : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(_a , termios.TCSADRAIN , _a )
return ch
def lowerCAmelCase__ ( ):
snake_case_ : Optional[Any] = get_raw_chars()
if ord(_a ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_a ) == KEYMAP["esc"]:
snake_case_ : List[str] = get_raw_chars()
if ord(_a ) == KEYMAP["mod_int"]:
snake_case_ : Tuple = get_raw_chars()
if ord(_a ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_a ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_a ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 568
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 568
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RoCBertTokenizer
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def __a ( self : List[str] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for i, value in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(_lowercase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=_lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = RoCBertWordpieceTokenizer(vocab=_lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __a ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.do_lower_case if hasattr(_lowercase , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE__ = """""".join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你好""" , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你是谁""" , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE__ = """你好,你是谁"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_shape_ids(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_pronunciation_ids(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_for_model(
_lowercase , _lowercase , _lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 379
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "timesformer"
def __init__( self : Any , _lowercase : Tuple=2_24 , _lowercase : Tuple=16 , _lowercase : List[Any]=3 , _lowercase : str=8 , _lowercase : Optional[Any]=7_68 , _lowercase : List[str]=12 , _lowercase : List[str]=12 , _lowercase : Dict=30_72 , _lowercase : List[str]="gelu" , _lowercase : List[str]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Tuple=0.02 , _lowercase : List[Any]=1E-6 , _lowercase : List[str]=True , _lowercase : Dict="divided_space_time" , _lowercase : Union[str, Any]=0 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = attention_type
SCREAMING_SNAKE_CASE__ = drop_path_rate
| 379
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[Any] = self.dummy_uncond_unet
lowercase__ : int = KarrasVeScheduler()
lowercase__ : int = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Any = pipe(num_inference_steps=2 , generator=a , output_type='numpy' ).images
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Dict = pipe(num_inference_steps=2 , generator=a , output_type='numpy' , return_dict=a )[0]
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase__ : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = 'google/ncsnpp-celebahq-256'
lowercase__ : str = UNetaDModel.from_pretrained(a )
lowercase__ : Union[str, Any] = KarrasVeScheduler()
lowercase__ : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : Optional[int] = pipe(num_inference_steps=2_0 , generator=a , output_type='numpy' ).images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase__ : Any = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 599
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Optional[Any] = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
| 1
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _UpperCamelCase :
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :Any ) -> List[str]:
pass
def UpperCAmelCase_ ( self :str ) -> Tuple:
pass
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :np.ndarray , lowerCamelCase :np.ndarray , lowerCamelCase :float ) -> Optional[int]:
UpperCAmelCase__ = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase , lowerCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Union[str, Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :int , lowerCamelCase :List[Any] , lowerCamelCase :str=None , **lowerCamelCase :str ) -> Tuple:
UpperCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel(lowerCamelCase )
UpperCAmelCase__ = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :int , lowerCamelCase :Tuple , lowerCamelCase :Optional[int] , lowerCamelCase :Any , lowerCamelCase :Optional[int]=None , **lowerCamelCase :Optional[Any] ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
UpperCAmelCase__ = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Dict , lowerCamelCase :List[str] , lowerCamelCase :Dict , lowerCamelCase :Optional[Any]=None , **lowerCamelCase :int ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
UpperCAmelCase__ = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
UpperCAmelCase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
UpperCAmelCase__ = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
UpperCAmelCase__ = after_output[0]
UpperCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1e-3 )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[Any] , lowerCamelCase :List[Any] , lowerCamelCase :Any , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[Any]=None , **lowerCamelCase :Tuple ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
UpperCAmelCase__ = model(
input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase )
UpperCAmelCase__ = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ = to_atuple(vision_model.config.image_size )
UpperCAmelCase__ = to_atuple(vision_model.config.patch_size )
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase__ = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self :int , lowerCamelCase :Union[str, Any] , lowerCamelCase :str , lowerCamelCase :List[str] ) -> Dict:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
UpperCAmelCase__ = inputs_dict
UpperCAmelCase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase__ = pt_model(**lowerCamelCase ).to_tuple()
UpperCAmelCase__ = fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
UpperCAmelCase__ = fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
UpperCAmelCase__ = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase , from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase__ = pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output_loaded.numpy() , 4e-2 )
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :List[str] , lowerCamelCase :List[str] ) -> Optional[int]:
UpperCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = VisionTextDualEncoderModel(lowerCamelCase )
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel(lowerCamelCase )
UpperCAmelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase )
UpperCAmelCase__ = fx_state
self.check_pt_flax_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :int , lowerCamelCase :Optional[Any] , lowerCamelCase :List[Any] ) -> str:
UpperCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = VisionTextDualEncoderModel(lowerCamelCase )
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel(lowerCamelCase )
UpperCAmelCase__ = load_flax_weights_in_pytorch_model(lowerCamelCase , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple ) -> List[str]:
UpperCAmelCase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]:
UpperCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def UpperCAmelCase_ ( self :List[str] ) -> Dict:
UpperCAmelCase__ = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[Any] ) -> str:
UpperCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self :Optional[int] ) -> str:
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_inputs_dict.pop("vision_config" )
UpperCAmelCase__ = config_inputs_dict.pop("text_config" )
UpperCAmelCase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_pretrained_model_and_inputs()
UpperCAmelCase__ = model_a(**lowerCamelCase )
UpperCAmelCase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
UpperCAmelCase__ = model_a(**lowerCamelCase )
UpperCAmelCase__ = after_outputs[0]
UpperCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1e-5 )
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
def UpperCAmelCase_ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase , text_from_pt=lowerCamelCase , )
UpperCAmelCase__ = 13
UpperCAmelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase__ = random_attention_mask([batch_size, 4] )
UpperCAmelCase__ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase__ = FlaxViTModel(lowerCamelCase )
UpperCAmelCase__ = FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def UpperCAmelCase_ ( self :List[Any] ) -> Tuple:
UpperCAmelCase__ = FlaxViTModelTester(self )
UpperCAmelCase__ = FlaxBertModelTester(self )
UpperCAmelCase__ = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase , text_from_pt=lowerCamelCase , )
UpperCAmelCase__ = 13
UpperCAmelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase__ = random_attention_mask([batch_size, 4] )
UpperCAmelCase__ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :List[Any] , lowerCamelCase :List[str] ) -> Optional[int]:
UpperCAmelCase__ = FlaxCLIPVisionModel(lowerCamelCase )
UpperCAmelCase__ = FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
UpperCAmelCase__ = FlaxCLIPVisionModelTester(self )
UpperCAmelCase__ = FlaxBertModelTester(self )
UpperCAmelCase__ = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
UpperCAmelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" )
UpperCAmelCase__ = model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase__ = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase , atol=1e-3 ) )
| 719
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""input_values""", """padding_mask"""]
def __init__( self :Dict , lowerCamelCase :int = 1 , lowerCamelCase :int = 2_4000 , lowerCamelCase :float = 0.0 , lowerCamelCase :float = None , lowerCamelCase :float = None , **lowerCamelCase :Optional[Any] , ) -> str:
super().__init__(feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = chunk_length_s
UpperCAmelCase__ = overlap
@property
def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :List[Any] , lowerCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None , lowerCamelCase :Optional[bool] = False , lowerCamelCase :Optional[int] = None , lowerCamelCase :Optional[Union[str, TensorType]] = None , lowerCamelCase :Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = bool(
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase__ = [np.asarray(lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [np.asarray(lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
UpperCAmelCase__ = None
UpperCAmelCase__ = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase__ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase__ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase__ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase__ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase__ = "max_length"
else:
UpperCAmelCase__ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase__ = self.pad(
lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase , padding=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if padding:
UpperCAmelCase__ = padded_inputs.pop("attention_mask" )
UpperCAmelCase__ = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCAmelCase__ = example[..., None]
input_values.append(example.T )
UpperCAmelCase__ = input_values
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 364
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : Union[str, Any] ,__A : Optional[Any]=768 ) -> Optional[int]:
super().__init__(__A )
_lowercase = proj_size
_lowercase = CLIPVisionModel(__A )
_lowercase = PaintByExampleMapper(__A )
_lowercase = nn.LayerNorm(config.hidden_size )
_lowercase = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
_lowercase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __UpperCAmelCase ( self : str ,__A : Optional[int] ,__A : Optional[int]=False ) -> Union[str, Any]:
_lowercase = self.model(pixel_values=__A )
_lowercase = clip_output.pooler_output
_lowercase = self.mapper(latent_states[:, None] )
_lowercase = self.final_layer_norm(__A )
_lowercase = self.proj_out(__A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : Dict ) -> str:
super().__init__()
_lowercase = (config.num_hidden_layers + 1) // 5
_lowercase = config.hidden_size
_lowercase = 1
_lowercase = nn.ModuleList(
[
BasicTransformerBlock(__A ,__A ,__A ,activation_fn='gelu' ,attention_bias=__A )
for _ in range(__A )
] )
def __UpperCAmelCase ( self : Tuple ,__A : Optional[Any] ) -> Dict:
for block in self.blocks:
_lowercase = block(__A )
return hidden_states
| 67
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 67
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ = 1_6 ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : str = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Optional[int] = 8
else:
lowerCAmelCase__ : Optional[int] = None
return tokenizer.pad(
lowerCamelCase_ , padding="longest" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
lowerCAmelCase__ : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase_ ) == "1":
lowerCAmelCase__ : int = 2
# New Code #
lowerCAmelCase__ : Dict = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase__ : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Optional[int] = config["lr"]
lowerCAmelCase__ : Dict = int(config["num_epochs"] )
lowerCAmelCase__ : Dict = int(config["seed"] )
lowerCAmelCase__ : str = int(config["batch_size"] )
lowerCAmelCase__ : Tuple = evaluate.load("glue" , "mrpc" )
set_seed(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
lowerCAmelCase__ : str = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase_ ):
lowerCAmelCase__ : Dict = model(**lowerCamelCase_ )
lowerCAmelCase__ : Optional[int] = output.loss
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**lowerCamelCase_ )
lowerCAmelCase__ : Tuple = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
lowerCAmelCase__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCamelCase_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ : List[Any] = parser.parse_args()
lowerCAmelCase__ : Union[str, Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 568
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""google/rembert""": 2_56,
}
snake_case = """▁"""
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__( self : Dict , a__ : int=None , a__ : List[Any]=None , a__ : List[Any]=True , a__ : Dict=True , a__ : int=False , a__ : Tuple="[CLS]" , a__ : Optional[int]="[SEP]" , a__ : Optional[Any]="<unk>" , a__ : List[str]="[SEP]" , a__ : Any="<pad>" , a__ : List[str]="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : List[str] = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[str] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error("Vocabulary path ({}) should be a directory".format(a__ ) )
return
lowerCAmelCase__ : List[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 568
| 1
|
'''simple docstring'''
_a : List[Any] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 689
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689
| 1
|
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE) + 1))
def __UpperCAmelCase ( lowerCamelCase_ = 10**6) -> int:
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6
| 0
|
lowerCamelCase_ = """Input must be a string of 8 numbers plus letter"""
lowerCamelCase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def lowerCamelCase ( a_ ) -> bool:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Expected string as input, found {type(a_ ).__name__}'''
raise TypeError(a_ )
lowerCAmelCase_ = spanish_id.replace('-' , '' ).upper()
if len(a_ ) != 9:
raise ValueError(a_ )
try:
lowerCAmelCase_ = int(spanish_id_clean[0:8] )
lowerCAmelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a_ ) from ex
if letter.isdigit():
raise ValueError(a_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 318
| 1
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase : int = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase : List[str] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A ( snake_case :str ) -> str:
re.sub('<n>' , '' , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 708
|
"""simple docstring"""
import string
import numpy
def A ( snake_case :int , snake_case :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case )
class __lowerCAmelCase :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
lowercase = numpy.vectorize(__SCREAMING_SNAKE_CASE )
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.modulus(__UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCamelCase = encrypt_key.shape[0]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string.index(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string[round(__UpperCAmelCase )]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = len(self.key_string )
if greatest_common_divisor(__UpperCAmelCase , len(self.key_string ) ) != 1:
__UpperCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [char for char in text.upper() if char in self.key_string]
__UpperCamelCase = chars[-1]
while len(__UpperCAmelCase ) % self.break_key != 0:
chars.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(self.encrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[
0
]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCamelCase = i
break
__UpperCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__UpperCAmelCase ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.make_decrypt_key()
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(decrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[0]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
__UpperCamelCase = int(input('Enter the order of the encryption key: ' ) )
__UpperCamelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case ):
__UpperCamelCase = [int(snake_case ) for x in input().split()]
hill_matrix.append(snake_case )
__UpperCamelCase = HillCipher(numpy.array(snake_case ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__UpperCamelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__UpperCamelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case ) )
elif option == "2":
__UpperCamelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293
| 0
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673
|
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase = 100 ):
'''simple docstring'''
UpperCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 673
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( _SCREAMING_SNAKE_CASE : int ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
_SCREAMING_SNAKE_CASE = create_tensor(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = gather(_SCREAMING_SNAKE_CASE )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( _SCREAMING_SNAKE_CASE : List[str] ):
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == state.num_processes, F'{gathered_obj}, {len(_SCREAMING_SNAKE_CASE )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _a ( _SCREAMING_SNAKE_CASE : List[str] ):
_SCREAMING_SNAKE_CASE = create_tensor(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = broadcast(_SCREAMING_SNAKE_CASE )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( _SCREAMING_SNAKE_CASE : Any ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(_SCREAMING_SNAKE_CASE )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
# For now runs on only two processes
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = reduce(_SCREAMING_SNAKE_CASE , "sum" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'{reduced_tensor} != {truth_tensor}'
def _a ( _SCREAMING_SNAKE_CASE : Any ):
# For now runs on only two processes
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = reduce(_SCREAMING_SNAKE_CASE , "mean" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'{reduced_tensor} != {truth_tensor}'
def _a ( _SCREAMING_SNAKE_CASE : int ):
# For xla_spawn (TPUs)
main()
def _a ( ):
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("testing gather" )
test_gather(_SCREAMING_SNAKE_CASE )
state.print("testing gather_object" )
test_gather_object(_SCREAMING_SNAKE_CASE )
state.print("testing broadcast" )
test_broadcast(_SCREAMING_SNAKE_CASE )
state.print("testing pad_across_processes" )
test_pad_across_processes(_SCREAMING_SNAKE_CASE )
state.print("testing reduce_sum" )
test_reduce_sum(_SCREAMING_SNAKE_CASE )
state.print("testing reduce_mean" )
test_reduce_mean(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 493
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : int = LayoutLMTokenizer
a : Optional[int] = LayoutLMTokenizerFast
a : Optional[int] = True
a : Any = True
def lowercase ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , **UpperCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE = "unwanted, running"
return input_text, output_text
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ):
pass
| 493
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowercase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowercase = logging.WARNING
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = os.getenv('''DATASETS_VERBOSITY''' , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _lowerCamelCase ( ):
'''simple docstring'''
return __name__.split('''.''' )[0]
def _lowerCamelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if name is None:
A_ = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = False
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowercase :
def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
A_ = args[0] if args else None
def __iter__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , lowerCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
def empty_fn(*lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return
__lowercase = True
class _lowercase :
def __call__( self : Optional[int] , *lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False , **lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase__ , **lowerCamelCase__ )
else:
return EmptyTqdm(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : int ) -> str:
"""simple docstring"""
A_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowercase = _tqdm_cls()
def _lowerCamelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _lowerCamelCase ( ):
'''simple docstring'''
global _tqdm_active
A_ = True
def _lowerCamelCase ( ):
'''simple docstring'''
global _tqdm_active
A_ = False
| 203
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593
|
'''simple docstring'''
a : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Optional[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Optional[Any] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert len(str(__UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case_ = year // 100
snake_case_ = (5 * (century % 4) + 2) % 7
snake_case_ = year % 100
snake_case_ = centurian % 12
snake_case_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593
| 1
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , A__ ).groups()[0]
class a ( __lowercase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = file_names
__SCREAMING_SNAKE_CASE: Any = image_transform
__SCREAMING_SNAKE_CASE: Optional[int] = label_to_id
def __len__( self ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.file_names[idx]
__SCREAMING_SNAKE_CASE: List[Any] = PIL.Image.open(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = raw_image.convert('''RGB''' )
if self.image_transform is not None:
__SCREAMING_SNAKE_CASE: Any = self.image_transform(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = extract_label(_lowerCAmelCase )
if self.label_to_id is not None:
__SCREAMING_SNAKE_CASE: Any = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
if args.with_tracking:
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__SCREAMING_SNAKE_CASE: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE: Union[str, Any] = config['''lr''']
__SCREAMING_SNAKE_CASE: Union[str, Any] = int(config['''num_epochs'''] )
__SCREAMING_SNAKE_CASE: Union[str, Any] = int(config['''seed'''] )
__SCREAMING_SNAKE_CASE: Dict = int(config['''batch_size'''] )
__SCREAMING_SNAKE_CASE: int = config['''image_size''']
if not isinstance(A__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE: int = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
__SCREAMING_SNAKE_CASE: Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__SCREAMING_SNAKE_CASE: Optional[int] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__SCREAMING_SNAKE_CASE: List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__SCREAMING_SNAKE_CASE: Tuple = os.path.split(A__ )[-1].split('''.''' )[0]
accelerator.init_trackers(A__ , A__ )
# Grab all the image filenames
__SCREAMING_SNAKE_CASE: str = [os.path.join(args.data_dir , A__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
__SCREAMING_SNAKE_CASE: List[Any] = [extract_label(A__ ) for fname in file_names]
__SCREAMING_SNAKE_CASE: Any = list(set(A__ ) )
id_to_label.sort()
__SCREAMING_SNAKE_CASE: Tuple = {lbl: i for i, lbl in enumerate(A__ )}
# Set the seed before splitting the data.
np.random.seed(A__ )
torch.manual_seed(A__ )
torch.cuda.manual_seed_all(A__ )
# Split our filenames between train and validation
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.permutation(len(A__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = int(0.8 * len(A__ ) )
__SCREAMING_SNAKE_CASE: str = random_perm[:cut]
__SCREAMING_SNAKE_CASE: Union[str, Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__SCREAMING_SNAKE_CASE: str = Compose([RandomResizedCrop(A__ , scale=(0.5, 1.0) ), ToTensor()] )
__SCREAMING_SNAKE_CASE: int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=A__ , label_to_id=A__ )
# For evaluation, we use a deterministic Resize
__SCREAMING_SNAKE_CASE: Union[str, Any] = Compose([Resize(A__ ), ToTensor()] )
__SCREAMING_SNAKE_CASE: Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=A__ , label_to_id=A__ )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE: Any = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE: Optional[Any] = create_model('''resnet50d''' , pretrained=A__ , num_classes=len(A__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE: Dict = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__SCREAMING_SNAKE_CASE: Union[str, Any] = False
for param in model.get_classifier().parameters():
__SCREAMING_SNAKE_CASE: Dict = True
# We normalize the batches of images to be a bit faster.
__SCREAMING_SNAKE_CASE: Any = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
__SCREAMING_SNAKE_CASE: Tuple = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__SCREAMING_SNAKE_CASE: Union[str, Any] = OneCycleLR(optimizer=A__ , max_lr=A__ , epochs=A__ , steps_per_epoch=len(A__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__SCREAMING_SNAKE_CASE: List[str] = 0
# We also need to keep track of the starting epoch so files are named properly
__SCREAMING_SNAKE_CASE: int = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__SCREAMING_SNAKE_CASE: Optional[int] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__SCREAMING_SNAKE_CASE: Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__SCREAMING_SNAKE_CASE: int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__SCREAMING_SNAKE_CASE: Dict = os.path.splitext(A__ )[0]
if "epoch" in training_difference:
__SCREAMING_SNAKE_CASE: List[str] = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
__SCREAMING_SNAKE_CASE: Any = None
else:
__SCREAMING_SNAKE_CASE: Optional[int] = int(training_difference.replace('''step_''' , '''''' ) )
__SCREAMING_SNAKE_CASE: List[str] = resume_step // len(A__ )
resume_step -= starting_epoch * len(A__ )
# Now we train the model
for epoch in range(A__ , A__ ):
model.train()
if args.with_tracking:
__SCREAMING_SNAKE_CASE: int = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__SCREAMING_SNAKE_CASE: Dict = accelerator.skip_first_batches(A__ , A__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__SCREAMING_SNAKE_CASE: Optional[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__SCREAMING_SNAKE_CASE: Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__SCREAMING_SNAKE_CASE: Dict = (batch['''image'''] - mean) / std
__SCREAMING_SNAKE_CASE: Tuple = model(A__ )
__SCREAMING_SNAKE_CASE: str = torch.nn.functional.cross_entropy(A__ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A__ , A__ ):
__SCREAMING_SNAKE_CASE: List[Any] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__SCREAMING_SNAKE_CASE: Any = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
model.eval()
__SCREAMING_SNAKE_CASE: Any = 0
__SCREAMING_SNAKE_CASE: List[str] = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__SCREAMING_SNAKE_CASE: List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__SCREAMING_SNAKE_CASE: Dict = (batch['''image'''] - mean) / std
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Any = model(A__ )
__SCREAMING_SNAKE_CASE: Optional[int] = outputs.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = accelerator.gather_for_metrics((predictions, batch['''label''']) )
__SCREAMING_SNAKE_CASE: Any = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__SCREAMING_SNAKE_CASE: Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(A__ ),
'''epoch''': epoch,
} , step=A__ , )
if checkpointing_steps == "epoch":
__SCREAMING_SNAKE_CASE: List[Any] = F"""epoch_{epoch}"""
if args.output_dir is not None:
__SCREAMING_SNAKE_CASE: Dict = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=A__ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=A__ , default=A__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=A__ , default=A__ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=A__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=A__ , default=A__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__SCREAMING_SNAKE_CASE: str = parser.parse_args()
__SCREAMING_SNAKE_CASE: List[str] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 202
|
import sys
lowercase_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( A__ : str = N ) -> int:
"""simple docstring"""
_lowercase =-sys.maxsize - 1
for i in range(len(A__ ) - 12 ):
_lowercase =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowercase =product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 291
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( __lowerCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ = model_type_to_module_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = importlib.import_module(F'''.{module_name}''', '''transformers.models''' )
try:
return getattr(__lowerCamelCase, __lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowerCamelCase, '''__name__''', __lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ = importlib.import_module('''transformers''' )
if hasattr(__lowerCamelCase, __lowerCamelCase ):
return getattr(__lowerCamelCase, __lowerCamelCase )
return None
def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = False, __lowerCamelCase = False, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = False, **__lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = get_file_from_repo(
__lowerCamelCase, __lowerCamelCase, cache_dir=__lowerCamelCase, force_download=__lowerCamelCase, resume_download=__lowerCamelCase, proxies=__lowerCamelCase, use_auth_token=__lowerCamelCase, revision=__lowerCamelCase, local_files_only=__lowerCamelCase, )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__lowerCamelCase, encoding='''utf-8''' ) as reader:
return json.load(__lowerCamelCase )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_A )
def _UpperCamelCase ( cls , _A , **_A ) -> int:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''config''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''trust_remote_code''' , _A )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ImageProcessingMixin.get_image_processor_dict(_A , **_A )
SCREAMING_SNAKE_CASE_ = config_dict.get('''image_processor_type''' , _A )
SCREAMING_SNAKE_CASE_ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE_ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE_ = config_dict.pop('''feature_extractor_type''' , _A )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE_ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE_ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE_ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A , **_A )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE_ = getattr(_A , '''image_processor_type''' , _A )
if hasattr(_A , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE_ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE_ = image_processor_class_from_name(_A )
SCREAMING_SNAKE_CASE_ = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE_ = image_processor_class is not None or type(_A ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE_ = resolve_trust_remote_code(
_A , _A , _A , _A )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ = get_class_from_dynamic_module(
_A , _A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''code_revision''' , _A )
if os.path.isdir(_A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_A , **_A )
elif image_processor_class is not None:
return image_processor_class.from_dict(_A , **_A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_A ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE_ = IMAGE_PROCESSOR_MAPPING[type(_A )]
return image_processor_class.from_dict(_A , **_A )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> str:
IMAGE_PROCESSOR_MAPPING.register(_A , _A )
| 597
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCAmelCase = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def A__ ( __lowerCamelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
SCREAMING_SNAKE_CASE_ = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ = r'''.*/layers_(\d+)'''
SCREAMING_SNAKE_CASE_ = key
if re.match(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = re.sub(r'''layers_(\d+)''', r'''block/\1/layer''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = r'''(encoder|decoder)\/'''
if re.match(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = re.match(__lowerCamelCase, __lowerCamelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ = re.sub(r'''/mlp/''', r'''/1/mlp/''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.sub(r'''/pre_mlp_layer_norm/''', r'''/1/layer_norm/''', __lowerCamelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ = re.sub(r'''/mlp/''', r'''/2/mlp/''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.sub(r'''/pre_mlp_layer_norm/''', r'''/2/layer_norm/''', __lowerCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ = new_key.replace(__lowerCamelCase, __lowerCamelCase )
print(F'''{key} -> {new_key}''' )
SCREAMING_SNAKE_CASE_ = s_dict.pop(__lowerCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ = s_dict[key]
for idx in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(__lowerCamelCase )
return s_dict
__UpperCAmelCase = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__lowerCamelCase, '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ = re.findall(r'''(.*) = ([0-9.]*)''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ = float(__lowerCamelCase ) if '''.''' in value else int(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.findall(r'''(.*activations) = \(\'(.*)\',\)''', __lowerCamelCase )[0]
SCREAMING_SNAKE_CASE_ = str(activation[1] )
SCREAMING_SNAKE_CASE_ = num_experts
SCREAMING_SNAKE_CASE_ = SwitchTransformersConfig(**__lowerCamelCase )
return config
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase="./", __lowerCamelCase=8 ):
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ = convert_gin_to_config(__lowerCamelCase, __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = SwitchTransformersConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = SwitchTransformersForConditionalGeneration(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = flax_params['''target''']
SCREAMING_SNAKE_CASE_ = flatten_dict(__lowerCamelCase, sep='''/''' )
SCREAMING_SNAKE_CASE_ = rename_keys(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = unflatten_dict(__lowerCamelCase, sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__lowerCamelCase, __lowerCamelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__UpperCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 597
| 1
|
'''simple docstring'''
_UpperCAmelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase =(
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowercase_ )}'
)
raise ValueError(lowercase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84
| 0
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "file.csv"
__lowerCAmelCase = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(lowerCamelCase , "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "malformed_file.csv"
__lowerCAmelCase = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(lowerCamelCase , "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "csv_with_image.csv"
__lowerCAmelCase = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(lowerCamelCase , "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "csv_with_label.csv"
__lowerCAmelCase = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(lowerCamelCase , "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "csv_with_int_list.csv"
__lowerCAmelCase = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(lowerCamelCase , "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = Csv()
__lowerCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def __lowerCAmelCase ( lowerCamelCase : Optional[int] ):
'''simple docstring'''
with open(lowerCamelCase , encoding="utf-8" ) as f:
__lowerCAmelCase = f.read().splitlines()[1]
__lowerCAmelCase = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
__lowerCAmelCase = csv._generate_tables([[csv_file_with_image]] )
__lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
__lowerCAmelCase = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
with open(lowerCamelCase , encoding="utf-8" ) as f:
__lowerCAmelCase = f.read().splitlines()[1:]
__lowerCAmelCase = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
__lowerCAmelCase = csv._generate_tables([[csv_file_with_label]] )
__lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
__lowerCAmelCase = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowerCamelCase ) for label in labels]
def __lowerCAmelCase ( lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowerCamelCase : [int(lowerCamelCase ) for i in x.split()]} )
__lowerCAmelCase = csv._generate_tables([[csv_file_with_int_list]] )
__lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
__lowerCAmelCase = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 39
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 1
|
from timeit import timeit
lowerCamelCase_ = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase ) // 2
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase ):
if len(__UpperCamelCase ) <= 2:
return True
if s[0] == s[len(__UpperCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def UpperCAmelCase_ ( __UpperCamelCase ):
return s == s[::-1]
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =f"""all({name}(key) is value for key, value in test_data.items())"""
SCREAMING_SNAKE_CASE__ =f"""from __main__ import test_data, {name}"""
SCREAMING_SNAKE_CASE__ =500_000
SCREAMING_SNAKE_CASE__ =timeit(stmt=__UpperCamelCase, setup=__UpperCamelCase, number=__UpperCamelCase )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 151
|
class __a :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ ={}
def __A ( self : Tuple ,_UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE__ ={}
self.num_vertices += 1
def __A ( self : str ,_UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
self.add_vertex(_UpperCamelCase )
self.add_vertex(_UpperCamelCase )
if head == tail:
return
SCREAMING_SNAKE_CASE__ =weight
SCREAMING_SNAKE_CASE__ =weight
def __A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ =list(edges[i] )
edges.sort(key=lambda _UpperCamelCase : e[2] )
for i in range(len(_UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE__ =edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
SCREAMING_SNAKE_CASE__ =weight
SCREAMING_SNAKE_CASE__ =weight
def __str__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE__ =self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def __A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __A ( _UpperCamelCase : Union[str, Any]=None ,_UpperCamelCase : List[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =Graph()
if vertices is None:
SCREAMING_SNAKE_CASE__ =[]
if edges is None:
SCREAMING_SNAKE_CASE__ =[]
for vertex in vertices:
g.add_vertex(_UpperCamelCase )
for edge in edges:
g.add_edge(*_UpperCamelCase )
return g
class __a :
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ ={}
def __len__( self : int ) -> Optional[int]:
'''simple docstring'''
return len(self.parent )
def __A ( self : Any ,_UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if item in self.parent:
return self.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =item
SCREAMING_SNAKE_CASE__ =0
return item
def __A ( self : Union[str, Any] ,_UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(_UpperCamelCase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE__ =self.find(self.parent[item] )
return self.parent[item]
def __A ( self : Any ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.find(_UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE__ =roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE__ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE__ =roota
return roota
return None
@staticmethod
def __A ( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =graph.num_vertices
SCREAMING_SNAKE_CASE__ =Graph.UnionFind()
SCREAMING_SNAKE_CASE__ =[]
while num_components > 1:
SCREAMING_SNAKE_CASE__ ={}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE__ =-1
SCREAMING_SNAKE_CASE__ =graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
SCREAMING_SNAKE_CASE__ =union_find.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =union_find.find(_UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =cheap_edge[vertex]
if union_find.find(_UpperCamelCase ) != union_find.find(_UpperCamelCase ):
union_find.union(_UpperCamelCase ,_UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE__ =num_components - 1
SCREAMING_SNAKE_CASE__ =Graph.build(edges=_UpperCamelCase )
return mst
| 151
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
def lowerCamelCase__ ( _lowerCamelCase : str ) -> int:
return (position - 1) // 2
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> int:
return (2 * position) + 1
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self : Any ) -> None:
lowerCamelCase_ = []
lowerCamelCase_ = {}
lowerCamelCase_ = 0
def __len__( self : List[str] ) -> int:
return self.elements
def __repr__( self : List[str] ) -> str:
return str(self.heap )
def UpperCamelCase ( self : List[str] ) -> bool:
return self.elements == 0
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
self.heap.append((elem, weight) )
lowerCamelCase_ = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase_ )
def UpperCamelCase ( self : int ) -> T:
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCamelCase_ = self.heap[0]
self._bubble_down(UpperCamelCase_ )
return elem
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
lowerCamelCase_ = self.position_map[elem]
lowerCamelCase_ = (elem, weight)
if position > 0:
lowerCamelCase_ = get_parent_position(UpperCamelCase_ )
lowerCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : T ) -> None:
lowerCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
lowerCamelCase_ = get_parent_position(UpperCamelCase_ )
lowerCamelCase_ = self.heap[curr_pos]
lowerCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_up(UpperCamelCase_ )
return None
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : T ) -> None:
lowerCamelCase_ = self.position_map[elem]
lowerCamelCase_ = self.heap[curr_pos]
lowerCamelCase_ = get_child_left_position(UpperCamelCase_ )
lowerCamelCase_ = get_child_right_position(UpperCamelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCamelCase_ = self.heap[child_left_position]
lowerCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
if child_left_position < self.elements:
lowerCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
else:
return None
if child_right_position < self.elements:
lowerCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
return None
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
lowerCamelCase_ = self.heap[nodea_pos][0]
lowerCamelCase_ = self.heap[nodea_pos][0]
lowerCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCamelCase_ = nodea_pos
lowerCamelCase_ = nodea_pos
class a ( Generic[T] ):
def __init__( self : Any ) -> None:
lowerCamelCase_ = {}
lowerCamelCase_ = 0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : int ) -> int:
return self.nodes
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
if node not in self.connections:
lowerCamelCase_ = {}
self.nodes += 1
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
self.add_node(UpperCamelCase_ )
self.add_node(UpperCamelCase_ )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def lowerCamelCase__ ( _lowerCamelCase : str , ) -> tuple[dict[T, int], dict[T, T | None]]:
lowerCamelCase_ = {node: maxsize for node in graph.connections}
lowerCamelCase_ = {node: None for node in graph.connections}
lowerCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_lowercase , _lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCamelCase_ = priority_queue.extract_min()
lowerCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowercase , dist[neighbour] )
lowerCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowercase , dist[neighbour] )
lowerCamelCase_ = node
return dist, parent
| 704
|
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCamelCase_ = range(3 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=1 , **_lowerCamelCase : int ) -> str:
lowerCamelCase_ = factor * value
lowerCamelCase_ = value
while not is_prime(_lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCamelCase )
return value
| 137
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95
|
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676
| 0
|
def a ( lowerCamelCase_ = 1000 ):
'''simple docstring'''
lowercase__ = -1
lowercase__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase__ = n - a - b
if c * c == (a * a + b * b):
lowercase__ = a * b * c
if candidate >= product:
lowercase__ = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 712
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671
| 0
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'segformer'
def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[32, 64, 160, 256] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=0.1 , lowercase=1e-6 , lowercase=256 , lowercase=255 , **lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , lowercase , )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = kwargs.get("reshape_last_stage" , lowercase )
A__ = semantic_loss_ignore_index
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return 12
| 514
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCAmelCase__ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(lowercase , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(lowercase ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> Any:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = "".join(lowercase ).replace(lowercase , " " ).strip()
return out_string
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowercase )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 514
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : str = "openai/clip-vit-large-patch14" ) -> None:
A : int = device
A : Optional[Any] = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
A : Optional[int] = [0.48145466, 0.4578275, 0.40821073]
A : Tuple = [0.26862954, 0.26130258, 0.27577711]
A : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A : int = torchvision.transforms.Resize(2_24 )
A : Optional[int] = torchvision.transforms.CenterCrop(2_24 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> str:
A : Tuple = self.resize(__lowerCamelCase )
A : Dict = self.center_crop(__lowerCamelCase )
A : Any = self.normalize(__lowerCamelCase )
return images
def __call__( self : List[str] , __lowerCamelCase : int=None , __lowerCamelCase : int=None , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A : Any = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase )
A : Optional[Any] = self.preprocess_img(__lowerCamelCase )
A : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Tuple=0.01 , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str="image" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=False , ) -> None:
super().__init__()
A : Optional[Any] = None
A : str = device if device else get_device()
if vqgan:
A : List[str] = vqgan
else:
A : Dict = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
A : Optional[Any] = clip
else:
A : Dict = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
A : Optional[Any] = ProcessorGradientFlow(device=self.device )
A : Dict = iterations
A : Tuple = lr
A : Tuple = log
A : Optional[int] = make_grid
A : str = return_val
A : List[Any] = quantize
A : str = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int=5 , __lowerCamelCase : str=True ) -> Union[str, Any]:
A : Optional[Any] = []
if output_path is None:
A : List[Any] = "./animation.gif"
if input_path is None:
A : str = self.save_path
A : Optional[int] = sorted(glob(input_path + "/*" ) )
if not len(__lowerCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__lowerCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
A : Union[str, Any] = total_duration / len(__lowerCamelCase )
A : Optional[Any] = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
A : int = 1.5
A : int = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase )
print(F"""gif saved to {output_path}""" )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None ) -> Union[str, Any]:
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
A : List[str] = preprocess(Image.open(__lowerCamelCase ) , target_image_size=2_56 ).to(self.device )
A : str = preprocess_vqgan(__lowerCamelCase )
A , *A : Optional[Any] = self.vqgan.encode(__lowerCamelCase )
return z
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
A : int = self.latent.detach().requires_grad_()
A : List[str] = base_latent + transform_vector
if self.quantize:
A , *A : Union[str, Any] = self.vqgan.quantize(__lowerCamelCase )
else:
A : str = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None ) -> Dict:
A : Union[str, Any] = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase )
A : int = self.clip(**__lowerCamelCase )
A : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
A : Any = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> str:
A : Any = self._get_clip_similarity(pos_prompts["prompts"] , __lowerCamelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
A : List[Any] = self._get_clip_similarity(neg_prompts["prompts"] , __lowerCamelCase , weights=neg_prompts["weights"] )
else:
A : Tuple = torch.tensor([1] , device=self.device )
A : List[Any] = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ) -> str:
A : Any = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device )
A : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A : List[str] = self._add_vector(__lowerCamelCase )
A : List[Any] = loop_post_process(__lowerCamelCase )
A : Any = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print("CLIP loss" , __lowerCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
wandb.init(reinit=__lowerCamelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
A : List[str] = Image.open(__lowerCamelCase )
A : Optional[Any] = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(__lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
if not prompts:
return []
A : Optional[Any] = []
A : int = []
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : int = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__lowerCamelCase , (tuple, list) ):
A : List[Any] = prompt[0]
A : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
A , A : Optional[Any] = prompt.split(":" )
A : Union[str, Any] = float(__lowerCamelCase )
else:
A : Tuple = prompt
A : Optional[int] = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase , device=self.device ),
}
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=None , ) -> Optional[int]:
if image_path:
A : str = self._get_latent(__lowerCamelCase )
else:
A : List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
A : List[str] = self.process_prompts(__lowerCamelCase )
A : Dict = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
A : Any = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
A : List[Any] = save_path + "_" + get_timestamp()
os.makedirs(__lowerCamelCase )
A : Dict = save_path
A : List[str] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__lowerCamelCase ) )
A : Union[str, Any] = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 17
|
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
| 17
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['OwlViTFeatureExtractor']
__A = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593
|
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) < 2:
return collection
def circle_sort_util(A_, A_, A_ ) -> bool:
__magic_name__ = False
if low == high:
return swapped
__magic_name__ = low
__magic_name__ = high
while left < right:
if collection[left] > collection[right]:
__magic_name__ , __magic_name__ = (
collection[right],
collection[left],
)
__magic_name__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__magic_name__ , __magic_name__ = (
collection[right + 1],
collection[left],
)
__magic_name__ = True
__magic_name__ = low + int((high - low) / 2 )
__magic_name__ = circle_sort_util(A_, A_, A_ )
__magic_name__ = circle_sort_util(A_, mid + 1, A_ )
return swapped or left_swap or right_swap
__magic_name__ = True
while is_not_sorted is True:
__magic_name__ = circle_sort_util(A_, 0, len(A_ ) - 1 )
return collection
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Tuple = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 529
| 0
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
lowerCAmelCase = model
lowerCAmelCase = kwargs.get('model_save_dir' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = kwargs.get('latest_model_name' , _SCREAMING_SNAKE_CASE )
def __call__( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {k: np.array(_SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()}
return self.model.run(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
lowerCAmelCase = 'CPUExecutionProvider'
return ort.InferenceSession(_SCREAMING_SNAKE_CASE , providers=[provider] , sess_options=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ).joinpath(_SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase = self.model_save_dir.joinpath(_SCREAMING_SNAKE_CASE )
if src_path.exists():
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ).joinpath(_SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# saving model weights/files
self._save_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , provider=_SCREAMING_SNAKE_CASE , sess_options=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE )
# load model from hub
else:
# download model
lowerCAmelCase = hf_hub_download(
repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ).parent
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ).name
lowerCAmelCase = OnnxRuntimeModel.load_model(_SCREAMING_SNAKE_CASE , provider=_SCREAMING_SNAKE_CASE , sess_options=_SCREAMING_SNAKE_CASE )
return cls(model=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = None
if len(str(_SCREAMING_SNAKE_CASE ).split('@' ) ) == 2:
lowerCAmelCase , lowerCAmelCase = model_id.split('@' )
return cls._from_pretrained(
model_id=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 514
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
lowerCAmelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCAmelCase = text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCAmelCase = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _snake_case ( a_ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs['attention_mask'][:, -trim:]
lowerCAmelCase = model_inputs.pop('conversation' )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
lowerCAmelCase = model_outputs['output_ids']
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 514
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (DDIMParallelScheduler,)
_lowercase = (("eta", 0.0), ("num_inference_steps", 5_0))
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Dict = scheduler_class(**A_ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = 10, 0.0
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for t in scheduler.timesteps:
_UpperCAmelCase : Optional[Any] = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step(A_ , A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=A_ , num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=A_ , eta=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 10, 0.0
scheduler.set_timesteps(A_ )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter + 0.1
_UpperCAmelCase : str = self.dummy_sample_deter - 0.1
_UpperCAmelCase : List[str] = samplea.shape[0]
_UpperCAmelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
_UpperCAmelCase : List[Any] = torch.arange(A_ )[0:3, None].repeat(1 , A_ )
_UpperCAmelCase : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_UpperCAmelCase : Tuple = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A_ )
_UpperCAmelCase : str = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Any = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.full_loop()
_UpperCAmelCase : List[str] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 300
|
from statistics import mean, stdev
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int = 3 ) -> list:
_UpperCAmelCase : Tuple = min(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = max(lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase ) for x in data]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int = 3 ) -> list:
_UpperCAmelCase : Union[str, Any] = mean(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = stdev(lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase ) for x in data]
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bert"""
def __init__( self , lowerCAmelCase_=3_0522 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-1_2 , lowerCAmelCase_=0 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class snake_case ( __snake_case ):
"""simple docstring"""
@property
def snake_case__ ( self ):
if self.task == "multiple-choice":
__lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 576
|
from __future__ import annotations
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
for j in range(_UpperCAmelCase ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[float]:
'''simple docstring'''
__lowercase = [float("inf" )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('Enter number of vertices: ').strip())
lowerCAmelCase__ = int(input('Enter number of edges: ').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
lowerCAmelCase__ = {'src': src, 'dst': dest, 'weight': weight}
lowerCAmelCase__ = int(input('\nEnter shortest path source:').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 576
| 1
|
'''simple docstring'''
from pathlib import Path
import fire
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open('''w''' ).write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 51
|
import math
def _UpperCAmelCase (UpperCamelCase__ : int ):
return math.sqrt(UpperCamelCase__ ) * math.sqrt(UpperCamelCase__ ) == num
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Dict = 0
_A : Dict = n
while left <= right:
_A : Optional[int] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_A : Optional[Any] = mid - 1
else:
_A : str = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 503
| 0
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_snake_case = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any = None
def __snake_case ( SCREAMING_SNAKE_CASE: "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE: List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
_lowerCAmelCase = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_lowerCAmelCase = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
_lowerCAmelCase = partition_df.collect()
_lowerCAmelCase = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = df
_lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ) -> str:
"""simple docstring"""
yield from self.generate_examples_fn()
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : np.random.Generator ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return len(self.partition_order )
class _SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] = SparkConfig
def __init__( self : Tuple , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : str , ) -> Tuple:
"""simple docstring"""
import pyspark
_lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCAmelCase = df
_lowerCAmelCase = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def __lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
def create_cache_and_write_probe(UpperCAmelCase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
_lowerCAmelCase = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ) -> str:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_lowerCAmelCase = self.df.count()
_lowerCAmelCase = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCAmelCase = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCAmelCase = min(lowercase_ , int(approx_total_size / max_shard_size ) )
_lowerCAmelCase = self.df.repartition(lowercase_ )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ) -> List[str]:
"""simple docstring"""
import pyspark
_lowerCAmelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
_lowerCAmelCase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCAmelCase = self.config.features
_lowerCAmelCase = self._writer_batch_size
_lowerCAmelCase = self._fs.storage_options
def write_arrow(UpperCAmelCase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
_lowerCAmelCase = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
_lowerCAmelCase = 0
_lowerCAmelCase = writer_class(
features=lowercase_ , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
_lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
_lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
_lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
_lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
_lowerCAmelCase = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
_lowerCAmelCase = (
self.df.mapInArrow(lowercase_ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : List[str] , ) -> str:
"""simple docstring"""
self._validate_cache_dir()
_lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
_lowerCAmelCase = not is_remote_filesystem(self._fs )
_lowerCAmelCase = os.path.join if is_local else posixpath.join
_lowerCAmelCase = """-TTTTT-SSSSS-of-NNNNN"""
_lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowerCAmelCase = path_join(self._output_dir , lowercase_ )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = []
_lowerCAmelCase = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
_lowerCAmelCase
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
_lowerCAmelCase = total_num_examples
_lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
rename(
lowercase_ , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
_lowerCAmelCase = []
_lowerCAmelCase = 0
for i in range(len(lowercase_ ) ):
_lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
_lowerCAmelCase = 0
_lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(lowercase_ , '' ) , )
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : "datasets.SplitGenerator" , ) -> Any:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 708
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 6_4, 6_4)
_snake_case = torch.rand(1) * 9_9_9
_snake_case = torch.randn(2, 7_7, 7_6_8)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_6_6
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 491
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class A_ ( _a , _a ):
lowerCAmelCase__ = 1
@register_to_config
def __init__( self: List[Any] ,__lowerCAmelCase: int = 2_000 ,__lowerCAmelCase: float = 0.15 ,__lowerCAmelCase: float = 0.01 ,__lowerCAmelCase: float = 13_48.0 ,__lowerCAmelCase: float = 1e-5 ,__lowerCAmelCase: int = 1 ,):
'''simple docstring'''
_lowerCamelCase : int = sigma_max
# setable values
_lowerCamelCase : Tuple = None
self.set_sigmas(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: float = None ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : str = torch.linspace(1 ,__lowerCAmelCase ,__lowerCAmelCase ,device=__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ):
'''simple docstring'''
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : List[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : str = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) ,math.log(__lowerCAmelCase ) ,__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
_lowerCamelCase : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Any = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(__lowerCAmelCase ,__lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Optional[Any] = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : Tuple = diffusion.unsqueeze(-1 )
_lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : str = randn_tensor(
sample.shape ,layout=sample.layout ,generator=__lowerCAmelCase ,device=sample.device ,dtype=sample.dtype )
_lowerCamelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase ,prev_sample_mean=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Optional[Any] = randn_tensor(sample.shape ,layout=sample.layout ,generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean()
_lowerCamelCase : List[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean()
_lowerCamelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Dict = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : Optional[int] = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = timesteps.to(original_samples.device )
_lowerCamelCase : str = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self: List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 46
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def _lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
A__ = bs[:]
A__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
A__ = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_, UpperCAmelCase_ ) )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
A__ = json.load(SCREAMING_SNAKE_CASE__ )
A__ = {v: k for k, v in self.encoder.items()}
A__ = errors # how to handle errors in decoding
A__ = bytes_to_unicode()
A__ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[1:-1]
A__ = [tuple(merge.split() ) for merge in bpe_merges]
A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A__ = {}
A__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def snake_case__ ( self ) -> List[Any]:
return len(self.encoder )
def snake_case__ ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
if token in self.cache:
return self.cache[token]
A__ = tuple(SCREAMING_SNAKE_CASE__ )
A__ = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
A__ = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
A__ = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(SCREAMING_SNAKE_CASE__ )
A__ = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
A__ = get_pairs(SCREAMING_SNAKE_CASE__ )
A__ = " ".join(SCREAMING_SNAKE_CASE__ )
A__ = word
return word
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
A__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(" " ) )
return bpe_tokens
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = "".join(SCREAMING_SNAKE_CASE__ )
A__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" )
A__ = 0
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
A__ = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE__ ) + "\n" )
index += 1
return vocab_file, merge_file
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
A__ = " " + text
return (text, kwargs)
| 104
| 0
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _A ( __a ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*a_ , **a_ )
lowercase = {}
def A__ ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = super().add_tokens(a_ , *a_ , **a_ )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def A__ ( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=1 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = []
if num_vec_per_token == 1:
self.try_adding_tokens(a_ , *a_ , **a_ )
output.append(a_ )
else:
lowercase = []
for i in range(a_ ):
lowercase = placeholder_token + f'_{i}'
self.try_adding_tokens(a_ , *a_ , **a_ )
output.append(a_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
lowercase = output
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 ):
"""simple docstring"""
if isinstance(a_ , a_ ):
lowercase = []
for i in range(len(a_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase = self.token_map[placeholder_token]
lowercase = tokens[: 1 + int(len(a_ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase = copy.copy(a_ )
random.shuffle(a_ )
lowercase = text.replace(a_ , """ """.join(a_ ) )
return text
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
a_ , vector_shuffle=a_ , prop_tokens_to_load=a_ ) , *a_ , **a_ , )
def A__ ( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , **__lowerCAmelCase ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
a_ , vector_shuffle=a_ , prop_tokens_to_load=a_ ) , *a_ , **a_ , )
| 706
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_sql_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_sql_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowerCAmelCase__ ) ) as con:
lowercase = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = os.path.join(lowerCAmelCase__ , """tmp.sql""" )
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
SqlDatasetWriter(lowerCAmelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase = iter_sql_file(lowerCAmelCase__ )
lowercase = iter_sql_file(lowerCAmelCase__ )
for rowa, rowa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = os.path.join(lowerCAmelCase__ , """tmp.sql""" )
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
SqlDatasetWriter(lowerCAmelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase = iter_sql_file(lowerCAmelCase__ )
lowercase = iter_sql_file(lowerCAmelCase__ )
for rowa, rowa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = os.path.join(lowerCAmelCase__ , """tmp.sql""" )
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
with pytest.raises(lowerCAmelCase__ ):
SqlDatasetWriter(lowerCAmelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 197
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE : int = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 710
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''SpeechT5FeatureExtractor'''
__lowerCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : Tuple = kwargs.pop('''audio''' , _UpperCAmelCase )
__a : List[Any] = kwargs.pop('''text''' , _UpperCAmelCase )
__a : List[Any] = kwargs.pop('''text_target''' , _UpperCAmelCase )
__a : Tuple = kwargs.pop('''audio_target''' , _UpperCAmelCase )
__a : List[str] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__a : Dict = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
elif text is not None:
__a : Union[str, Any] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
else:
__a : str = None
if audio_target is not None:
__a : Optional[int] = self.feature_extractor(audio_target=_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
__a : Any = targets['''input_values''']
elif text_target is not None:
__a : int = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
__a : Optional[Any] = targets['''input_ids''']
else:
__a : List[Any] = None
if inputs is None:
return targets
if targets is not None:
__a : Dict = labels
__a : List[Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__a : List[str] = decoder_attention_mask
return inputs
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : List[str] = kwargs.pop('''input_values''' , _UpperCAmelCase )
__a : Optional[Any] = kwargs.pop('''input_ids''' , _UpperCAmelCase )
__a : Dict = kwargs.pop('''labels''' , _UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__a : str = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif input_ids is not None:
__a : str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
else:
__a : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and "input_ids" in labels[0]):
__a : str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
__a : str = targets['''input_ids''']
else:
__a : Optional[Any] = self.feature_extractor.feature_size
__a : Tuple = self.feature_extractor.num_mel_bins
__a : str = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__a : List[str] = feature_size_hack
__a : int = targets['''input_values''']
else:
__a : Tuple = None
if inputs is None:
return targets
if targets is not None:
__a : List[Any] = labels
__a : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__a : Any = decoder_attention_mask
return inputs
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
| 52
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : List[Any] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 430
| 0
|
def __SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 702
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_a : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_a : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
with open(lowerCamelCase__ , "rb" ) as f:
__UpperCAmelCase : str = Image.open(lowerCamelCase__ )
return im.convert("RGB" )
@dataclass
class __A :
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case :Optional[str] = field(default=__magic_name__ , metadata={"help": "A folder containing the training data."} )
snake_case :Optional[str] = field(default=__magic_name__ , metadata={"help": "A folder containing the validation data."} )
snake_case :Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _snake_case ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __A :
snake_case :str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__magic_name__ )} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case :str = field(default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
snake_case :bool = field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = torch.stack([example["pixel_values"] for example in examples] )
__UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__UpperCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__UpperCAmelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase : Optional[Any] = {}
if data_args.train_dir is not None:
__UpperCAmelCase : Dict = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
__UpperCAmelCase : List[Any] = os.path.join(data_args.validation_dir , "**" )
__UpperCAmelCase : str = load_dataset(
"imagefolder" , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCAmelCase : Optional[Any] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase__ ) and data_args.train_val_split > 0.0:
__UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
__UpperCAmelCase : List[str] = split["train"]
__UpperCAmelCase : Any = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Optional[Any] = dataset["train"].features["labels"].names
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = {}, {}
for i, label in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : str = str(lowerCamelCase__ )
__UpperCAmelCase : int = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : List[Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel=lowerCamelCase__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Optional[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__UpperCAmelCase : int = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__UpperCAmelCase : List[Any] = image_processor.size["shortest_edge"]
else:
__UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
__UpperCAmelCase : Dict = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__UpperCAmelCase : List[Any] = Compose(
[
RandomResizedCrop(lowerCamelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__UpperCAmelCase : List[str] = Compose(
[
Resize(lowerCamelCase__ ),
CenterCrop(lowerCamelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(lowerCamelCase__ ):
__UpperCAmelCase : str = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCamelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Optional[int] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCamelCase__ )
# Initalize our trainer
__UpperCAmelCase : Tuple = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Dict = last_checkpoint
__UpperCAmelCase : str = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : List[Any] = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
if __name__ == "__main__":
main()
| 168
|
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = "▁"
_a : str = {"vocab_file": "prophetnet.tokenizer"}
_a : Any = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_a : List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_a : int = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = collections.OrderedDict()
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as reader:
__UpperCAmelCase : Any = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = token.rstrip("\n" )
__UpperCAmelCase : Optional[int] = index
return vocab
class __A (__magic_name__ ):
snake_case :Tuple = VOCAB_FILES_NAMES
snake_case :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :str = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[UNK]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase : Union[str, Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__UpperCAmelCase : List[str] = f"""[unused{i}]"""
__UpperCAmelCase : Any = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase : Any = 12
__UpperCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase_ )
def __getstate__( self ):
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Tuple = None
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + [1]
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 168
| 1
|
"""simple docstring"""
def A__ ( UpperCamelCase ):
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
A, A = 1, 1
for _ in range(number_of_steps - 1 ):
A, A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[int] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_snake_case : Union[str, Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = GPTaTokenizer
def __init__( self :Optional[Any] , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :str="<|endoftext|>" , __UpperCamelCase :Tuple="<|endoftext|>" , __UpperCamelCase :Dict="<|endoftext|>" , __UpperCamelCase :Union[str, Any]=False , **__UpperCamelCase :Union[str, Any] , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
A = kwargs.pop("add_bos_token" , __UpperCamelCase )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
A = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
A = add_prefix_space
A = pre_tok_class(**__UpperCamelCase )
A = add_prefix_space
def lowerCamelCase ( self :Any , *__UpperCamelCase :Optional[int] , **__UpperCamelCase :Any ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Dict , *__UpperCamelCase :List[str] , **__UpperCamelCase :Optional[int] ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
A = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :"Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
| 524
| 1
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__lowercase = hex_num[0] == '''-'''
if is_negative:
__lowercase = hex_num[1:]
try:
__lowercase = int(A__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__lowercase = ''''''
while int_num > 0:
__lowercase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41
| 1
|
from __future__ import annotations
import requests
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def _lowerCamelCase ( snake_case = 10 ):
_lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowerCAmelCase = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def _lowerCamelCase ( snake_case = 10 ):
_lowerCAmelCase = hackernews_top_stories(snake_case )
return "\n".join('* [{title}]({url})'.format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 225
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =(UniPCMultistepScheduler,)
UpperCamelCase__ =(("num_inference_steps", 2_5),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : Dict ):
_lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str]=0 , **lowercase__ : Union[str, Any] ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_lowerCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(lowercase__ , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Tuple=0 , **lowercase__ : List[Any] ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_lowerCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : int=None , **lowercase__ : List[Any] ):
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , 'set_timesteps' ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase = scheduler.timesteps[5]
_lowerCAmelCase = scheduler.timesteps[6]
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=lowercase__ )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=lowercase__ )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.check_over_configs(thresholding=lowercase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , solver_order=lowercase__ , solver_type=lowercase__ , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , )
_lowerCAmelCase = self.full_loop(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , )
assert not torch.isnan(lowercase__ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.check_over_configs(lower_order_final=lowercase__ )
self.check_over_configs(lower_order_final=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowercase__ , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=lowercase__ , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
assert sample.dtype == torch.floataa
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **lowercase__ : List[Any] ):
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 225
| 1
|
from __future__ import annotations
def _a ( __lowercase ) -> Union[str, Any]:
"""simple docstring"""
if len(__lowercase ) == 0:
return []
__UpperCamelCase , __UpperCamelCase = min(__lowercase ), max(__lowercase )
__UpperCamelCase = int(max_value - min_value ) + 1
__UpperCamelCase = [[] for _ in range(__lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(__lowercase )
return [v for bucket in buckets for v in sorted(__lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 383
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=2, A_=32, A_=16, A_=3, A_=True, A_=True, A_=32, A_=4, A_=[0, 1, 2, 3], A_=4, A_=37, A_="gelu", A_=0.1, A_=0.1, A_=0.02, A_=3, A_=[1, 384, 24, 24], A_=True, A_=None, ) -> Optional[int]:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =backbone_out_indices
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =backbone_featmap_shape
UpperCAmelCase__ =scope
UpperCAmelCase__ =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ =(image_size // patch_size) ** 2
UpperCAmelCase__ =num_patches + 1
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ ={
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=A_, backbone_featmap_shape=self.backbone_featmap_shape, )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =DPTModel(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =DPTForDepthEstimation(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =DPTForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_, labels=A_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( a, a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =DPTModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, has_text_modality=A_, hidden_size=37 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_, nn.Linear ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1], A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def __UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =True
if model_class in get_values(A_ ):
continue
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.train()
UpperCAmelCase__ =self._prepare_for_class(A_, A_, return_labels=A_ )
UpperCAmelCase__ =model(**A_ ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> List[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =False
UpperCAmelCase__ =True
if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ =self._prepare_for_class(A_, A_, return_labels=A_ )
UpperCAmelCase__ =model(**A_ ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =_config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(config=A_ )
# Skip the check for the backbone
UpperCAmelCase__ =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase__ =[f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"""Parameter {name} of model {model_class} seems not properly initialized""", )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase__ =DPTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCAmelCase ( self ) -> Any:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ ="add"
with self.assertRaises(A_ ):
UpperCAmelCase__ =DPTForDepthEstimation(A_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCAmelCase__ =DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A_ )
UpperCAmelCase__ =prepare_img()
UpperCAmelCase__ =image_processor(images=A_, return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
UpperCAmelCase__ =outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase__ =torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape, A_ )
UpperCAmelCase__ =torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, A_, atol=1E-4 ) )
| 625
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase):
'''simple docstring'''
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
snake_case__ : Dict = "ylacombe/bark-small"
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Optional[int] = "en_speaker_1"
snake_case__ : Any = "This is a test string"
snake_case__ : str = "speaker_embeddings_path.json"
snake_case__ : str = "speaker_embeddings"
def UpperCAmelCase ( self , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase__)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[str] = BarkProcessor(tokenizer=lowerCamelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case__ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
snake_case__ : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
snake_case__ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case__ : Optional[int] = 35
snake_case__ : Tuple = 2
snake_case__ : Union[str, Any] = 8
snake_case__ : Union[str, Any] = {
"semantic_prompt": np.ones(lowerCamelCase__),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
snake_case__ : List[Any] = processor(text=self.input_string , voice_preset=lowerCamelCase__)
snake_case__ : List[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase__ , np.array([])).tolist())
# test loading voice preset from npz file
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , "file.npz")
np.savez(lowerCamelCase__ , **lowerCamelCase__)
snake_case__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCamelCase__)
snake_case__ : Optional[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase__ , np.array([])).tolist())
# test loading voice preset from the hub
snake_case__ : Dict = processor(text=self.input_string , voice_preset=self.voice_preset)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.get_tokenizer()
snake_case__ : int = BarkProcessor(tokenizer=lowerCamelCase__)
snake_case__ : List[str] = processor(text=self.input_string)
snake_case__ : int = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 150
| 1
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase_ (__A ):
__magic_name__ = '''char'''
__magic_name__ = '''bpe'''
__magic_name__ = '''wp'''
lowerCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''char_tokenizer''']
__magic_name__ = '''ViTImageProcessor'''
__magic_name__ = '''MgpstrTokenizer'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : str ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCAmelCase_ : List[str] = tokenizer
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("gpt2" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Tuple ) -> List[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase_ : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
UpperCAmelCase_ : str = self.char_tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase_ : List[str] = encodings["input_ids"]
return inputs
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = sequences
UpperCAmelCase_ : Tuple = char_preds.size(0 )
UpperCAmelCase_ , UpperCAmelCase_ : int = self._decode_helper(lowerCAmelCase_ , "char" )
UpperCAmelCase_ , UpperCAmelCase_ : str = self._decode_helper(lowerCAmelCase_ , "bpe" )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._decode_helper(lowerCAmelCase_ , "wp" )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = []
for i in range(lowerCAmelCase_ ):
UpperCAmelCase_ : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase_ : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase_ : Tuple = scores.index(max(lowerCAmelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : Optional[Any] = final_strs
UpperCAmelCase_ : Tuple = final_scores
UpperCAmelCase_ : Optional[int] = char_strs
UpperCAmelCase_ : int = bpe_strs
UpperCAmelCase_ : Any = wp_strs
return out
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> str:
if format == DecodeType.CHARACTER:
UpperCAmelCase_ : Dict = self.char_decode
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[Any] = "[s]"
elif format == DecodeType.BPE:
UpperCAmelCase_ : int = self.bpe_decode
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Any = "#"
elif format == DecodeType.WORDPIECE:
UpperCAmelCase_ : Union[str, Any] = self.wp_decode
UpperCAmelCase_ : Optional[int] = 102
UpperCAmelCase_ : Union[str, Any] = "[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = [], []
UpperCAmelCase_ : int = pred_logits.size(0 )
UpperCAmelCase_ : str = pred_logits.size(1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase_ , sorted=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = preds_index.view(-1 , lowerCAmelCase_ )[:, 1:]
UpperCAmelCase_ : Optional[Any] = decoder(lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.nn.functional.softmax(lowerCAmelCase_ , dim=2 ).max(dim=2 )
UpperCAmelCase_ : int = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase_ ):
UpperCAmelCase_ : Union[str, Any] = preds_str[index].find(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = preds_str[index][:pred_eos]
UpperCAmelCase_ : Tuple = preds_index[index].cpu().tolist()
UpperCAmelCase_ : Optional[int] = pred_index.index(lowerCAmelCase_ ) if eos_token in pred_index else -1
UpperCAmelCase_ : str = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase_ : Dict = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase_ )
conf_scores.append(lowerCAmelCase_ )
return dec_strs, conf_scores
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Any ) -> List[str]:
UpperCAmelCase_ : List[Any] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase_ )]
return decode_strs
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Any ) -> List[str]:
return self.bpe_tokenizer.batch_decode(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Any ) -> List[Any]:
UpperCAmelCase_ : Dict = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase_ )]
return decode_strs
| 95
|
import os
def __UpperCAmelCase( ):
with open(os.path.dirname(lowercase_ ) + '''/p022_names.txt''' ) as file:
_lowerCamelCase : Optional[int] = str(file.readlines()[0] )
_lowerCamelCase : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_lowerCamelCase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 114
| 0
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self ) -> List[Any]:
lowercase_ : str = psutil.Process()
lowercase_ : str = False
def _lowerCamelCase (self ) -> str:
lowercase_ : Tuple = -1
while True:
lowercase_ : Tuple = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : List[str] = True
lowercase_ : List[str] = threading.Thread(target=self.peak_monitor )
lowercase_ : Tuple = True
self.thread.start()
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Tuple = False
self.thread.join()
return self.cpu_memory_peak
_A = PeakCPUMemory()
def _UpperCamelCase ( ):
# Time
lowercase_ : Dict = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase_ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase_ : Union[str, Any] = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ )
torch.cuda.reset_peak_memory_stats()
return measures
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
# Time
lowercase_ : Any = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase_ : Tuple = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowercase_ : Tuple = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase_ : Any = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
lowercase_ : Dict = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
return measures
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE_ )]:.2f}MiB''' )
lowercase_ : Union[str, Any] = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 703
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_A = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_A = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = ['''input_ids''', '''attention_mask''']
A : str = NllbTokenizer
A : List[int] = []
A : List[int] = []
def __init__(self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowercase_ : Any = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
lowercase_ : Optional[int] = vocab_file
lowercase_ : str = False if not self.vocab_file else True
lowercase_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Optional[Any] = src_lang if src_lang is not None else 'eng_Latn'
lowercase_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase (self ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase (self , _a , _a , _a , _a , **_a ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
lowercase_ : Tuple = self.convert_tokens_to_ids(_a )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _lowerCamelCase (self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding:
lowercase_ : Dict = src_lang
lowercase_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowerCamelCase (self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase (self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : Dict = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
lowercase_ : Tuple = []
lowercase_ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Union[str, Any] = [self.cur_lang_code]
lowercase_ : Union[str, Any] = [self.eos_token_id]
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : str = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Optional[int] = [self.cur_lang_code]
lowercase_ : Dict = [self.eos_token_id]
lowercase_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : Any = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 438
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = DanceDiffusionPipeline
UpperCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
UpperCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , use_timestep_embedding=SCREAMING_SNAKE_CASE_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
SCREAMING_SNAKE_CASE_ = IPNDMScheduler()
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.audios
SCREAMING_SNAKE_CASE_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE_ = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def _lowercase (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch_device
SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
SCREAMING_SNAKE_CASE_ = output.audios
SCREAMING_SNAKE_CASE_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch_device
SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
SCREAMING_SNAKE_CASE_ = output.audios
SCREAMING_SNAKE_CASE_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 626
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = '''tokenizer_file'''
UpperCAmelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
SCREAMING_SNAKE_CASE_ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
SCREAMING_SNAKE_CASE_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ )['''input_ids''']
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
SCREAMING_SNAKE_CASE_ = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = next(iter(SCREAMING_SNAKE_CASE_ ) )['''premise'''] # pick up one data
SCREAMING_SNAKE_CASE_ = list(sample_data.values() )
SCREAMING_SNAKE_CASE_ = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 626
| 1
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
def __a ( __lowerCamelCase : int=None , __lowerCamelCase : Dict=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class lowercase :
lowerCamelCase_ =list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
lowerCamelCase_ =list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
lowerCamelCase_ =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Benchmark training of model'} )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Verbose memory tracing'} )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Trace memory line by line'} )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Save result to a CSV file'} )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Save all print statements in a log file'} )
lowerCamelCase_ =field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to print environment information'} )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
lowerCamelCase_ =field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
lowerCamelCase_ =field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
lowerCamelCase_ =field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
lowerCamelCase_ =field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
lowerCamelCase_ =field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
lowerCamelCase_ =field(
default=f'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
lowerCamelCase_ =field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
lowerCamelCase_ =field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _lowercase , )
def __UpperCAmelCase ( self : int) -> Tuple:
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
if len(self.models) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased'].")
return self.models
@property
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU.")
return False
else:
return True
| 721
|
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase_ : Optional[Any] = "bert-base-cased"
lowerCAmelCase_ : Any = "fp16"
lowerCAmelCase_ : Union[str, Any] = "bf16"
lowerCAmelCase_ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
super().setUp()
lowercase_ = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = F'{i + 1}'
lowercase_ = strategy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = prefetch_policy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def __UpperCAmelCase ( self : Dict) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = state_dict_type
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
lowercase_ = AutoModel.from_pretrained(__lowerCAmelCase)
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ = self.dist_env.copy()
lowercase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase_ = "2000"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowercase_ = self.dist_env.copy()
lowercase_ = "TRANSFORMER_BASED_WRAP"
lowercase_ = "T5Layer"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCAmelCase) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception))
lowercase_ = self.dist_env.copy()
lowercase_ = "SIZE_BASED_WRAP"
lowercase_ = "0"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ = self.dist_env.copy()
lowercase_ = mp_dtype
with mockenv_context(**__lowerCAmelCase):
lowercase_ = Accelerator()
if mp_dtype == "fp16":
lowercase_ = torch.floataa
elif mp_dtype == "bf16":
lowercase_ = torch.bfloataa
lowercase_ = MixedPrecision(param_dtype=__lowerCAmelCase , reduce_dtype=__lowerCAmelCase , buffer_dtype=__lowerCAmelCase)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCAmelCase)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __lowerCAmelCase))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str]) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ = self.dist_env.copy()
lowercase_ = str(__lowerCAmelCase).lower()
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCAmelCase))
@require_fsdp
@require_multi_gpu
@slow
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : Optional[int]) -> str:
super().setUp()
lowercase_ = 0.82
lowercase_ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase_ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ = 160
lowercase_ = 160
lowercase_ = inspect.getfile(accelerate.test_utils)
lowercase_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
lowercase_ = os.path.join(self.test_scripts_folder , "test_performance.py")
lowercase_ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase_ = cmd.copy()
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no")
else:
cmd_config.append("--mixed_precision=fp16")
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Dict) -> Dict:
lowercase_ = os.path.join(self.test_scripts_folder , "test_checkpointing.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
if strategy != "FULL_SHARD":
continue
lowercase_ = len(__lowerCAmelCase)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
"--partial_train_epoch=1",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
lowercase_ = cmd_config[:-1]
lowercase_ = os.path.join(self.tmpdir , "epoch_0")
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Optional[int]) -> int:
lowercase_ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"])
else:
cmd_config.extend(["--mixed_precision=no"])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"])
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
| 461
| 0
|
"""simple docstring"""
import copy
import re
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = 'hp'
_SCREAMING_SNAKE_CASE : List[Any] = {}
_SCREAMING_SNAKE_CASE : Any = None
@classmethod
def lowerCAmelCase ( cls : int , snake_case_ : int , snake_case_ : Any ):
__snake_case = prefix
__snake_case = defaults
cls.build_naming_info()
@staticmethod
def lowerCAmelCase ( snake_case_ : List[Any] , snake_case_ : str ):
if len(snake_case_ ) == 0:
return ""
__snake_case = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(snake_case_ ) + 1 ):
__snake_case = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__snake_case = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(snake_case_ : List[Any] ):
__snake_case = ""
while integer != 0:
__snake_case = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__snake_case = 0
while True:
__snake_case = word + "#" + int_to_alphabetic(snake_case_ )
if sword in info["reverse_short_word"]:
continue
else:
__snake_case = sword
break
__snake_case = short_word
__snake_case = word
return short_word
@staticmethod
def lowerCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Tuple ):
__snake_case = param_name.split("_" )
__snake_case = [TrialShortNamer.shortname_for_word(snake_case_ , snake_case_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__snake_case = ["", "_"]
for separator in separators:
__snake_case = separator.join(snake_case_ )
if shortname not in info["reverse_short_param"]:
__snake_case = shortname
__snake_case = param_name
return shortname
return param_name
@staticmethod
def lowerCAmelCase ( snake_case_ : Tuple , snake_case_ : List[str] ):
__snake_case = TrialShortNamer.shortname_for_key(snake_case_ , snake_case_ )
__snake_case = short_name
__snake_case = param_name
@classmethod
def lowerCAmelCase ( cls : List[str] ):
if cls.NAMING_INFO is not None:
return
__snake_case = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__snake_case = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(snake_case_ , snake_case_ )
__snake_case = info
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , snake_case_ : Optional[Any] ):
cls.build_naming_info()
assert cls.PREFIX is not None
__snake_case = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__snake_case = cls.NAMING_INFO["short_param"][k]
if isinstance(snake_case_ , snake_case_ ):
__snake_case = 1 if v else 0
__snake_case = "" if isinstance(snake_case_ , (int, float) ) else "-"
__snake_case = F'''{key}{sep}{v}'''
name.append(snake_case_ )
return "_".join(snake_case_ )
@classmethod
def lowerCAmelCase ( cls : int , snake_case_ : Union[str, Any] ):
__snake_case = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__snake_case = []
else:
__snake_case = repr.split("_" )
__snake_case = {}
for value in values:
if "-" in value:
__snake_case , __snake_case = value.split("-" )
else:
__snake_case = re.sub("[0-9.]" , "" , snake_case_ )
__snake_case = float(re.sub("[^0-9.]" , "" , snake_case_ ) )
__snake_case = cls.NAMING_INFO["reverse_short_param"][p_k]
__snake_case = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__snake_case = cls.DEFAULTS[k]
return parameters
| 163
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE ):
__snake_case = getattr(SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE , "handle_key" , SCREAMING_SNAKE_CASE )
return func
return decorator
def __UpperCamelCase ( *SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE ):
__snake_case = getattr(SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE , "handle_key" , SCREAMING_SNAKE_CASE )
return func
return decorator
class __magic_name__ ( lowercase__ ):
def __new__( cls : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Tuple ):
__snake_case = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , "key_handler" ):
setattr(snake_case_ , "key_handler" , {} )
setattr(snake_case_ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case = getattr(snake_case_ , "handle_key" , [] )
for key in handled_keys:
__snake_case = value
return new_cls
@staticmethod
def lowerCAmelCase ( cls : Dict ):
__snake_case = get_character()
if char != KEYMAP["undefined"]:
__snake_case = ord(snake_case_ )
__snake_case = cls.key_handler.get(snake_case_ )
if handler:
__snake_case = char
return handler(cls )
else:
return None
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 163
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_( A , A ):
UpperCAmelCase_ = f"""{sampling_rate}"""
UpperCAmelCase_ = """1"""
UpperCAmelCase_ = """f32le"""
UpperCAmelCase_ = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(A )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(A , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def A_( A , A , A = "f32le" , ):
UpperCAmelCase_ = f"""{sampling_rate}"""
UpperCAmelCase_ = """1"""
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = """alsa"""
UpperCAmelCase_ = """default"""
elif system == "Darwin":
UpperCAmelCase_ = """avfoundation"""
UpperCAmelCase_ = """:0"""
elif system == "Windows":
UpperCAmelCase_ = """dshow"""
UpperCAmelCase_ = """default"""
UpperCAmelCase_ = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(A , A )
for item in iterator:
yield item
def A_( A , A , A = None , A = None , A = "f32le" , ):
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(A , A , format_for_conversion=A )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=A )
for item in chunk_bytes_iter(A , A , stride=(stride_left, stride_right) , stream=A ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["""raw"""] , dtype=A )
UpperCAmelCase_ = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_( A , A , A , A = False ):
UpperCAmelCase_ = b""""""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(A ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A ) > stride_left:
UpperCAmelCase_ = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def A_( A , A ):
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(A , stdout=subprocess.PIPE , bufsize=A ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 486
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def A_( ):
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=A , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=A , default=5 )
parser.add_argument("""--batch_size""" , type=A , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=A , default=1 )
parser.add_argument("""--freeze""" , type=A , default=A )
parser.add_argument("""--learning_rate""" , type=A , default=5E-4 )
parser.add_argument("""--seed""" , type=A , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=A , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=A , default=10 )
parser.add_argument("""--weight_decay""" , type=A , default=0.01 )
parser.add_argument("""--output_dir""" , type=A , default="""./results""" )
return parser.parse_args()
UpperCamelCase__ : Any = load("""accuracy""")
def A_( A ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_pred
UpperCAmelCase_ = np.argmax(A , axis=1 )
return metric.compute(predictions=A , references=A )
class _UpperCamelCase ( A_ ):
'''simple docstring'''
def __init__( self : str , __lowercase : Any ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = trainer
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowercase : int , __lowercase : Tuple , __lowercase : Dict , **__lowercase : Dict ):
'''simple docstring'''
if control.should_evaluate:
UpperCAmelCase_ = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def A_( ):
UpperCAmelCase_ = get_args()
set_seed(args.seed )
UpperCAmelCase_ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
UpperCAmelCase_ = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase_ = train_test["""test"""].train_test_split(test_size=0.5 )
UpperCAmelCase_ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ = tokenizer.eos_token
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase_ = False
UpperCAmelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(A ):
UpperCAmelCase_ = tokenizer(example["""src"""] , truncation=A , max_length=1024 )
UpperCAmelCase_ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase_ = train_test_validation.map(
A , batched=A , remove_columns=train_test_validation["""train"""].column_names , )
UpperCAmelCase_ = DataCollatorWithPadding(tokenizer=A )
UpperCAmelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
UpperCAmelCase_ = Trainer(
model=A , args=A , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=A , data_collator=A , compute_metrics=A , )
print("""Training...""" )
trainer.add_callback(CustomCallback(A ) )
trainer.train()
if __name__ == "__main__":
main()
| 486
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
_UpperCamelCase = '▁'
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
"""simple docstring"""
__snake_case : str = VOCAB_FILES_NAMES
__snake_case : int = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Dict = BigBirdTokenizer
__snake_case : int = ["""input_ids""", """attention_mask"""]
__snake_case : int = []
def __init__( self :str , __lowercase :Tuple=None , __lowercase :Optional[Any]=None , __lowercase :Any="<unk>" , __lowercase :Any="<s>" , __lowercase :Union[str, Any]="</s>" , __lowercase :List[str]="<pad>" , __lowercase :Optional[Any]="[SEP]" , __lowercase :str="[MASK]" , __lowercase :Optional[Any]="[CLS]" , **__lowercase :Union[str, Any] , ):
__lowerCamelCase : Optional[Any] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
__lowerCamelCase : Any =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
__lowerCamelCase : Any =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
__lowerCamelCase : str =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
__lowerCamelCase : int =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
__lowerCamelCase : Optional[int] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[int] =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
__lowerCamelCase : Optional[int] =vocab_file
__lowerCamelCase : Optional[Any] =False if not self.vocab_file else True
def __lowercase ( self :List[Any] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : int =[self.sep_token_id]
__lowerCamelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self :Optional[int] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None , __lowercase :bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def __lowercase ( self :Optional[Any] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : Dict =[self.sep_token_id]
__lowerCamelCase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self :int , __lowercase :str , __lowercase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 179
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 6
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : List[Any] = 16
_SCREAMING_SNAKE_CASE : List[Any] = 32
def UpperCAmelCase_ ( _A , _A = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
_A , batched=_A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
_A , padding='''longest''' , max_length=_A , pad_to_multiple_of=_A , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Optional[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _A ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# New Code #
SCREAMING_SNAKE_CASE__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_A )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config['''lr''']
SCREAMING_SNAKE_CASE__ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=1_00 , num_training_steps=(len(_A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_A ):
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__ = output.loss
accelerator.backward(_A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_A , references=_A , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_A , default=_A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_A , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 472
|
from importlib import import_module
from .logging import get_logger
_SCREAMING_SNAKE_CASE : Optional[int] = get_logger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=None ) -> int:
SCREAMING_SNAKE_CASE__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = module._original_module if isinstance(__lowerCamelCase , _PatchedModuleObj ) else module
class UpperCAmelCase__ :
"""simple docstring"""
a = []
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None ) -> Any:
SCREAMING_SNAKE_CASE__ = obj
SCREAMING_SNAKE_CASE__ = target
SCREAMING_SNAKE_CASE__ = new
SCREAMING_SNAKE_CASE__ = target.split('''.''' )[0]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = attrs or []
def __enter__( self : int ) -> Tuple:
*SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowerCamelCase ) ):
try:
SCREAMING_SNAKE_CASE__ = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
SCREAMING_SNAKE_CASE__ = getattr(self.obj , __lowerCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
SCREAMING_SNAKE_CASE__ = obj_attr
# patch at top level
setattr(self.obj , __lowerCamelCase , _PatchedModuleObj(__lowerCamelCase , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(self.obj , __lowerCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowerCamelCase , __lowerCamelCase , _PatchedModuleObj(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , __lowerCamelCase )
# finally set the target attribute
setattr(__lowerCamelCase , __lowerCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
SCREAMING_SNAKE_CASE__ = getattr(import_module('''.'''.join(__lowerCamelCase ) ) , __lowerCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __lowerCamelCase ) is attr_value:
SCREAMING_SNAKE_CASE__ = getattr(self.obj , __lowerCamelCase )
setattr(self.obj , __lowerCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
SCREAMING_SNAKE_CASE__ = globals()['''__builtins__'''][target_attr]
setattr(self.obj , __lowerCamelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Optional[Any] , *__lowerCamelCase : Tuple ) -> List[str]:
for attr in list(self.original ):
setattr(self.obj , __lowerCamelCase , self.original.pop(__lowerCamelCase ) )
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self : int ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 472
| 1
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__SCREAMING_SNAKE_CASE : Dict = True
except ImportError:
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_( lowercase_ : Namespace ) -> Optional[Any]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowerCamelCase__ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowerCamelCase__ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , *lowerCamelCase__ ):
_lowerCamelCase = testing
_lowerCamelCase = testing_file
_lowerCamelCase = path
def snake_case__ ( self ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_lowerCamelCase = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCamelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(lowerCamelCase__ )
_lowerCamelCase = configuration['''lowercase_modelname''']
_lowerCamelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
_lowerCamelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(lowerCamelCase__ ):
with open(lowerCamelCase__ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
with open(lowerCamelCase__ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Create temp file
_lowerCamelCase , _lowerCamelCase = mkstemp()
_lowerCamelCase = False
with fdopen(lowerCamelCase__ , '''w''' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
_lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(lowerCamelCase__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase__ ):
with open(lowerCamelCase__ ) as datafile:
_lowerCamelCase = []
_lowerCamelCase = False
_lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
_lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowerCamelCase__ )
| 661
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661
| 1
|
'''simple docstring'''
lowerCAmelCase_ : str = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ : Union[str, Any] = concatenate_datasets
lowerCAmelCase_ : str = DownloadConfig
lowerCAmelCase_ : List[str] = DownloadManager
lowerCAmelCase_ : str = DownloadMode
lowerCAmelCase_ : Dict = DownloadConfig
lowerCAmelCase_ : int = DownloadMode
lowerCAmelCase_ : List[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 715
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCAmelCase_ : Optional[Any] = '.'
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Any = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCAmelCase_ : List[str] = line.strip()
lowerCAmelCase_ : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCAmelCase_ : Dict = '\n'.join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 464
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCamelCase :
def __init__( self :List[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any]=13 , __magic_name__ :List[str]=7 , __magic_name__ :int=True , __magic_name__ :Tuple=True , __magic_name__ :Dict=True , __magic_name__ :Optional[int]=True , __magic_name__ :Tuple=99 , __magic_name__ :int=[1, 1, 2] , __magic_name__ :Optional[int]=1 , __magic_name__ :str=32 , __magic_name__ :Tuple=4 , __magic_name__ :Optional[int]=8 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :Dict="gelu_new" , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :str=512 , __magic_name__ :Tuple=3 , __magic_name__ :Union[str, Any]=0.02 , __magic_name__ :Tuple=3 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :List[Any]=None , __magic_name__ :List[str]=False , ) ->int:
lowercase : List[Any] = parent
lowercase : Union[str, Any] = batch_size
lowercase : List[str] = seq_length
lowercase : Any = is_training
lowercase : str = use_input_mask
lowercase : Any = use_token_type_ids
lowercase : Optional[int] = use_labels
lowercase : int = vocab_size
lowercase : Union[str, Any] = block_sizes
lowercase : Dict = num_decoder_layers
lowercase : Union[str, Any] = d_model
lowercase : Dict = n_head
lowercase : Dict = d_head
lowercase : Union[str, Any] = d_inner
lowercase : Optional[Any] = hidden_act
lowercase : List[Any] = hidden_dropout
lowercase : Union[str, Any] = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Optional[int] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Dict = 2
lowercase : Tuple = num_labels
lowercase : int = num_choices
lowercase : Dict = scope
lowercase : Union[str, Any] = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase : List[str] = n_head
# Used in the tests to check the size of the first hidden state
lowercase : int = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase : Dict = self.num_hidden_layers + 2
def __snake_case ( self :Any ) ->Any:
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = None
if self.use_input_mask:
lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[int] = None
lowercase : Tuple = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Optional[int] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , ) ->int:
lowercase : str = TFFunnelModel(config=lowercase_ )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : str = model(lowercase_ )
lowercase : Optional[int] = [input_ids, input_mask]
lowercase : Tuple = model(lowercase_ )
lowercase : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase : Tuple = False
lowercase : Optional[Any] = TFFunnelModel(config=lowercase_ )
lowercase : Any = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase : str = False
lowercase : int = TFFunnelModel(config=lowercase_ )
lowercase : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self :List[str] , __magic_name__ :List[Any] , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Any , ) ->List[str]:
lowercase : Tuple = TFFunnelBaseModel(config=lowercase_ )
lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(lowercase_ )
lowercase : Optional[Any] = [input_ids, input_mask]
lowercase : Any = model(lowercase_ )
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowercase : Union[str, Any] = False
lowercase : Optional[Any] = TFFunnelBaseModel(config=lowercase_ )
lowercase : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowercase : Tuple = False
lowercase : Union[str, Any] = TFFunnelBaseModel(config=lowercase_ )
lowercase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self :List[str] , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Tuple , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , ) ->List[str]:
lowercase : Tuple = TFFunnelForPreTraining(config=lowercase_ )
lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self :Optional[Any] , __magic_name__ :Any , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :List[Any] , __magic_name__ :Tuple , __magic_name__ :List[str] , ) ->Optional[int]:
lowercase : str = TFFunnelForMaskedLM(config=lowercase_ )
lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self :Optional[int] , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Optional[Any] , ) ->Union[str, Any]:
lowercase : str = self.num_labels
lowercase : Union[str, Any] = TFFunnelForSequenceClassification(config=lowercase_ )
lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self :int , __magic_name__ :Dict , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :str , ) ->List[str]:
lowercase : Optional[int] = self.num_choices
lowercase : Tuple = TFFunnelForMultipleChoice(config=lowercase_ )
lowercase : List[str] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase : List[str] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self :List[str] , __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Tuple , ) ->Tuple:
lowercase : Optional[int] = self.num_labels
lowercase : List[Any] = TFFunnelForTokenClassification(config=lowercase_ )
lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self :List[str] , __magic_name__ :List[str] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :str , ) ->List[Any]:
lowercase : List[Any] = TFFunnelForQuestionAnswering(config=lowercase_ )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self :Any ) ->str:
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict = config_and_inputs
lowercase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase (lowercase__ , lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __snake_case ( self :List[Any] ) ->List[str]:
lowercase : Any = TFFunnelModelTester(self )
lowercase : Dict = ConfigTester(self , config_class=lowercase_ )
def __snake_case ( self :Tuple ) ->Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __snake_case ( self :Any ) ->Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def __snake_case ( self :str ) ->Optional[int]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __snake_case ( self :Any ) ->Optional[int]:
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def __snake_case ( self :Optional[Any] ) ->int:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class UpperCamelCase (lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
lowercase : List[str] = TFFunnelModelTester(self , base=lowercase_ )
lowercase : Tuple = ConfigTester(self , config_class=lowercase_ )
def __snake_case ( self :int ) ->Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self :List[Any] ) ->str:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def __snake_case ( self :List[Any] ) ->Any:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def __snake_case ( self :Any ) ->List[str]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
| 264
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20
| 0
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[Any] =1_6
__lowerCAmelCase : int =3_2
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] = 1_6 ) -> Tuple:
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase__ :Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase__ :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 1_6
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowerCAmelCase__ , padding="""longest""" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase : List[str] =mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase__ ) == "1":
lowercase = 2
# Initialize accelerator
lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config["""lr"""]
lowercase = int(config["""num_epochs"""] )
lowercase = int(config["""seed"""] )
lowercase = int(config["""batch_size"""] )
lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase = batch_size // MAX_GPU_BATCH_SIZE
lowercase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
lowercase , lowercase = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.loss
lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase = parser.parse_args()
lowercase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 709
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 197
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.