code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def _lowerCamelCase ( a_ : Tuple , a_ : Optional[int]):
lowerCamelCase :str = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCamelCase ( a_ : Dict , a_ : Tuple , a_ : str):
lowerCamelCase :Dict = 0
while b > 0:
if b & 1:
lowerCamelCase :str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 166
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = IFImgaImgSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def snake_case ( self : List[str] ):
return self._get_superresolution_dummy_components()
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Tuple=0 ):
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :Optional[int] = torch.manual_seed(__snake_case )
else:
lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self : List[str] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self : Optional[int] ):
self._test_save_load_local()
def snake_case ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 166
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 707
|
'''simple docstring'''
from __future__ import annotations
import requests
def snake_case_ (_a : str ):
UpperCAmelCase = F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_a ).json()
def snake_case_ (_a : int = 1_0 ):
UpperCAmelCase = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
UpperCAmelCase = requests.get(_a ).json()[:max_stories]
return [get_hackernews_story(_a ) for story_id in story_ids]
def snake_case_ (_a : int = 1_0 ):
UpperCAmelCase = hackernews_top_stories(_a )
return "\n".join('''* [{title}]({url})'''.format(**_a ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 358
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase :List[str] = """
Human: <<task>>
Assistant: """
_lowerCAmelCase :int = """huggingface-tools/default-prompts"""
_lowerCAmelCase :Union[str, Any] = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __lowerCAmelCase ( a_ , a_ , a_="run" ) -> Any:
'''simple docstring'''
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE : Dict = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , a_ ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE : List[str] = cached_file(
a_ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(a_ , 'r' , encoding='utf-8' ) as f:
return f.read()
| 251
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase :Optional[int] = logging.getLogger(__name__)
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Any = "masked_bert"
def __init__( self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=0 , lowercase__="topK" , lowercase__="constant" , lowercase__=0.0 , **lowercase__ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = pruning_method
SCREAMING_SNAKE_CASE : Any = mask_init
SCREAMING_SNAKE_CASE : List[str] = mask_scale
| 251
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__A : Optional[Any] = None
__A : Tuple = logging.get_logger(__name__)
__A : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__A : Union[str, Any] = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
__A : Optional[Any] = {
"""google/fnet-base""": 5_1_2,
"""google/fnet-large""": 5_1_2,
}
__A : Optional[Any] = """▁"""
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''token_type_ids''']
a__ = FNetTokenizer
def __init__( self : Optional[int] , a : str=None , a : Tuple=None , a : Tuple=False , a : Tuple=True , a : Optional[Any]=True , a : Optional[int]="<unk>" , a : Dict="[SEP]" , a : Optional[Any]="<pad>" , a : Union[str, Any]="[CLS]" , a : Optional[int]="[MASK]" , **a : Optional[Any] , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def _UpperCAmelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Dict , a : str , a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 713
|
import os
import sys
import unittest
__A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a , """torch_and_transformers_and_onnx""" )
def _UpperCAmelCase ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a )
self.assertIn("""torch_and_transformers""" , a )
self.assertIn("""flax_and_transformers""" , a )
self.assertIn("""torch_and_transformers_and_onnx""" , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _UpperCAmelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a , a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a )
| 450
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__a :List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__a :Union[str, Any] = [0, 25, 50]
__a :Any = [25, 50, 75]
__a :Tuple = fuzz.membership.trimf(X, abca)
__a :List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__a :Union[str, Any] = np.ones(75)
__a :Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__a :Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__a :Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__a :List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__a :List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__a :Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__a :Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__a :Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__a :Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 86
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__=0.01 , a__=10_00 ) -> List[Any]:
'''simple docstring'''
__snake_case :int = p_stop
__snake_case :List[Any] = max_length
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :str = 0
__snake_case :Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case :str = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self , a__ , a__ , a__=False , a__=True ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
__snake_case :Union[str, Any] = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
__snake_case :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :int = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case :List[str] = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowercase ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ) -> List[str]:
'''simple docstring'''
random.seed(a__ )
__snake_case :Optional[int] = list(a__ )
__snake_case :Union[str, Any] = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
__snake_case :Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
__snake_case :Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case :str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
__snake_case :int = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = 42
__snake_case :Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
__snake_case :Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :str = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case :Dict = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
Accelerator()
__snake_case :Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 455
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCamelCase : str = logging.get_logger(__name__)
class snake_case__ ( UpperCamelCase):
def __init__( self : Any , *_A : int , **_A : Dict ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 216
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class snake_case__ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = {}
def A ( self : str , _A : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = {}
def A ( self : Dict , _A : str , _A : str , _A : float ) -> None:
if nodea not in self.connections:
self.add_node(_A )
if nodea not in self.connections:
self.add_node(_A )
UpperCAmelCase_ : Optional[Any] = probability
def A ( self : Tuple ) -> list[str]:
return list(self.connections )
def A ( self : Optional[int] , _A : str ) -> str:
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : str = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( A : str , A : list[tuple[str, str, float]] , A : int ) -> dict[str, int]:
UpperCAmelCase_ : int = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
UpperCAmelCase_ : List[str] = Counter(graph.get_nodes() )
UpperCAmelCase_ : int = start
for _ in range(A ):
UpperCAmelCase_ : Union[str, Any] = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 1
|
def a__ ( A_ ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
__magic_name__ , __magic_name__ = head.next, head
while fast and fast.next:
__magic_name__ = fast.next.next
__magic_name__ = slow.next
__magic_name__ = slow.next
__magic_name__ = None # Don't forget here! But forget still works!
# reverse the second part
__magic_name__ = None
while second:
__magic_name__ = second.next
__magic_name__ = node
__magic_name__ = second
__magic_name__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__magic_name__ = node.next
__magic_name__ = head.next
return True
def a__ ( A_ ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__magic_name__ = __magic_name__ = __magic_name__ = head
while fast and fast.next:
__magic_name__ , __magic_name__ = fast.next.next, slow.next
# 2. Push the second half into the stack
__magic_name__ = [slow.val]
while slow.next:
__magic_name__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__magic_name__ = cur.next
return True
def a__ ( A_ ):
'''simple docstring'''
if not head or not head.next:
return True
__magic_name__ = {}
__magic_name__ = 0
while head:
if head.val in d:
d[head.val].append(A_ )
else:
__magic_name__ = [pos]
__magic_name__ = head.next
pos += 1
__magic_name__ = pos - 1
__magic_name__ = 0
for v in d.values():
if len(A_ ) % 2 != 0:
middle += 1
else:
__magic_name__ = 0
for i in range(0, len(A_ ) ):
if v[i] + v[len(A_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 529
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""image_processor""", """tokenizer"""]
a__ = """BridgeTowerImageProcessor"""
a__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Dict , ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
__magic_name__ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 529
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = CTRLTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
lowerCamelCase_ = dict(zip(A__ , range(len(A__ ) ) ) )
lowerCamelCase_ = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
lowerCamelCase_ = {"""unk_token""": """<unk>"""}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A__ ) )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = """adapt react readapt apt"""
lowerCamelCase_ = """adapt react readapt apt"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = """adapt react readapt apt"""
lowerCamelCase_ = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
lowerCamelCase_ = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
| 720
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
lowerCamelCase_ = F'{src_lang}-{tgt_lang}'
lowerCamelCase_ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , "README.md" )
print(F'Generating {path}' )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__A =Path(__file__).resolve().parent.parent.parent
__A =repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__A =model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 313
| 0
|
from ...processing_utils import ProcessorMixin
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """WhisperFeatureExtractor"""
_UpperCAmelCase = """WhisperTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCamelCase__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = args[0]
SCREAMING_SNAKE_CASE_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 101
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["ChineseCLIPFeatureExtractor"]
_lowerCAmelCase : Any = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 193
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase =1_6
lowerCamelCase =3_2
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 , UpperCamelCase__ = "bert-base-cased" ):
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase__ : Tuple = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
UpperCamelCase__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
model.eval()
UpperCamelCase__ : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ : str = model(**UpperCamelCase__ )
UpperCamelCase__ : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
UpperCamelCase__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
UpperCamelCase__ : int = metric.compute()
return eval_metric["accuracy"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
# Initialize accelerator
UpperCamelCase__ : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ : Any = config['''lr''']
UpperCamelCase__ : Tuple = int(config['''num_epochs'''] )
UpperCamelCase__ : Optional[int] = int(config['''seed'''] )
UpperCamelCase__ : List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ : Any = args.model_name_or_path
set_seed(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Dict = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ : Any = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
UpperCamelCase__ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ : Dict = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ : str = 1
UpperCamelCase__ : int = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
UpperCamelCase__ : str = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ : int = 0
UpperCamelCase__ : int = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ : Dict = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ : Optional[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ : Optional[int] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ : Union[str, Any] = int(UpperCamelCase__ ) + 1
UpperCamelCase__ : List[Any] = evaluation_loop(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.print('''resumed checkpoint performance:''' , UpperCamelCase__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ : Optional[Any] = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
UpperCamelCase__ : Dict = model(**UpperCamelCase__ )
UpperCamelCase__ : List[Any] = outputs.loss
UpperCamelCase__ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ : List[str] = f'''epoch_{epoch}'''
UpperCamelCase__ : Optional[Any] = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
UpperCamelCase__ : Any = evaluation_loop(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Any = accuracy
UpperCamelCase__ : Optional[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ : Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ : Any = epoch
UpperCamelCase__ : str = overall_step
accelerator.print(f'''epoch {epoch}:''' , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase__ , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ : Optional[int] = parser.parse_args()
UpperCamelCase__ : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 462
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase__ : Optional[Any] = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Dict = set()
for token in tokens:
UpperCamelCase__ : Optional[int] = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
UpperCamelCase__ : List[Any] = list(UpperCamelCase__ )
return word_list
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase__ : str = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
UpperCamelCase__ : Optional[int] = bert_tokens
UpperCamelCase__ ,UpperCamelCase__ : Tuple = 0, len(UpperCamelCase__ )
while start < end:
UpperCamelCase__ : Any = True
if is_chinese(bert_word[start] ):
UpperCamelCase__ : Any = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
UpperCamelCase__ : List[Any] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase__ : Optional[Any] = '''##''' + bert_word[j]
UpperCamelCase__ : Dict = start + i
UpperCamelCase__ : List[str] = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = []
for i in range(0 , len(UpperCamelCase__ ) , 1_0_0 ):
UpperCamelCase__ : Optional[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws
UpperCamelCase__ : List[str] = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCamelCase__ : str = []
for i in range(0 , len(UpperCamelCase__ ) , 1_0_0 ):
UpperCamelCase__ : Dict = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCamelCase__ : Any = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = []
for id in input_ids:
UpperCamelCase__ : List[str] = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
UpperCamelCase__ : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : List[Any] = f.readlines()
UpperCamelCase__ : Tuple = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase__ : int = LTP(args.ltp ) # faster in GPU device
UpperCamelCase__ : int = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase__ : Dict = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Tuple = [json.dumps(UpperCamelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCamelCase =parser.parse_args()
main(args)
| 462
| 1
|
SCREAMING_SNAKE_CASE__ : Optional[int] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 85
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = CustomTokenizer
pass
| 59
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCamelCase = trt.Logger(trt.Logger.WARNING)
_lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
_lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
_lowerCamelCase = args.per_device_eval_batch_size
_lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCamelCase = True
_lowerCamelCase = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
_lowerCamelCase = '''temp_engine/bert-fp16.engine'''
if args.inta:
_lowerCamelCase = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
_lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Any = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : int = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _A )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _A )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _A )
# start time
__SCREAMING_SNAKE_CASE : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(_A ) for d_inp in d_inputs] + [int(_A ), int(_A )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_A , _A , _A )
cuda.memcpy_dtoh_async(_A , _A , _A )
# Synchronize the stream and take time
stream.synchronize()
# end time
__SCREAMING_SNAKE_CASE : List[str] = time.time()
__SCREAMING_SNAKE_CASE : str = end_time - start_time
__SCREAMING_SNAKE_CASE : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCamelCase = raw_datasets['''validation'''].column_names
_lowerCamelCase = '''question''' if '''question''' in column_names else column_names[0]
_lowerCamelCase = '''context''' if '''context''' in column_names else column_names[1]
_lowerCamelCase = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCamelCase = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
_lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase_ ( lowercase_ : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_A , stride=args.doc_stride , return_overflowing_tokens=_A , return_offsets_mapping=_A , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__SCREAMING_SNAKE_CASE : List[str] = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__SCREAMING_SNAKE_CASE : Optional[int] = tokenized_examples.sequence_ids(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__SCREAMING_SNAKE_CASE : str = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_lowerCamelCase = raw_datasets['''validation''']
# Validation Feature Creation
_lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
_lowerCamelCase = default_data_collator
_lowerCamelCase = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
_lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Dict="eval" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = postprocess_qa_predictions(
examples=_A , features=_A , predictions=_A , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_A , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__SCREAMING_SNAKE_CASE : Dict = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__SCREAMING_SNAKE_CASE : Dict = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__SCREAMING_SNAKE_CASE : Optional[Any] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_A , label_ids=_A )
_lowerCamelCase = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_A ) ) * engine.get_binding_dtype(_A ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCamelCase = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
_lowerCamelCase = 0.0
_lowerCamelCase = 0
_lowerCamelCase = timeit.default_timer()
_lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
_lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCamelCase = outputs
_lowerCamelCase = torch.tensor(start_logits)
_lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
_lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
_lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
_lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
_lowerCamelCase = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
_lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 719
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase__ = '''nat'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , _lowerCamelCase :int=4 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Optional[int]=6_4 , _lowerCamelCase :Optional[Any]=[3, 4, 6, 5] , _lowerCamelCase :Optional[int]=[2, 4, 8, 1_6] , _lowerCamelCase :str=7 , _lowerCamelCase :int=3.0 , _lowerCamelCase :Optional[Any]=True , _lowerCamelCase :List[str]=0.0 , _lowerCamelCase :str=0.0 , _lowerCamelCase :int=0.1 , _lowerCamelCase :int="gelu" , _lowerCamelCase :Dict=0.0_2 , _lowerCamelCase :str=1e-5 , _lowerCamelCase :List[Any]=0.0 , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Dict=None , **_lowerCamelCase :Union[str, Any] , ):
super().__init__(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : List[str] = embed_dim
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : Any = kernel_size
__SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
__SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
__SCREAMING_SNAKE_CASE : Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 401
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
_lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
snake_case = field(
default=__UpperCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
snake_case = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
snake_case = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
snake_case = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case = field(
default=__UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
snake_case = field(
default=__UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
datasets.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = train_dataset.features['label'].names
if training_args.do_eval:
lowerCamelCase = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCamelCase = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = predict_dataset.features['label'].names
# Labels
lowerCamelCase = len(UpperCamelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase_ , idalabel={str(UpperCamelCase_ ): label for i, label in enumerate(UpperCamelCase_ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase_ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase = False
def preprocess_function(UpperCamelCase_ : List[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=UpperCamelCase_ , max_length=data_args.max_seq_length , truncation=UpperCamelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
lowerCamelCase = train_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase_ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase = min(len(UpperCamelCase_ ) , data_args.max_eval_samples )
lowerCamelCase = eval_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase = eval_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase = min(len(UpperCamelCase_ ) , data_args.max_predict_samples )
lowerCamelCase = predict_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCamelCase = predict_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCamelCase = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase_ : EvalPrediction ):
lowerCamelCase = p.predictions[0] if isinstance(p.predictions , UpperCamelCase_ ) else p.predictions
lowerCamelCase = np.argmax(UpperCamelCase_ , axis=1 )
return metric.compute(predictions=UpperCamelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase = default_data_collator
elif training_args.fpaa:
lowerCamelCase = DataCollatorWithPadding(UpperCamelCase_ , pad_to_multiple_of=8 )
else:
lowerCamelCase = None
# Initialize our Trainer
lowerCamelCase = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase = last_checkpoint
lowerCamelCase = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
lowerCamelCase = train_result.metrics
lowerCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
lowerCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , UpperCamelCase_ )
trainer.save_metrics('train' , UpperCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase = trainer.evaluate(eval_dataset=UpperCamelCase_ )
lowerCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ )
lowerCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('eval' , UpperCamelCase_ )
trainer.save_metrics('eval' , UpperCamelCase_ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCamelCase , lowerCamelCase , lowerCamelCase = trainer.predict(UpperCamelCase_ , metric_key_prefix='predict' )
lowerCamelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase_ )
)
lowerCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('predict' , UpperCamelCase_ )
trainer.save_metrics('predict' , UpperCamelCase_ )
lowerCamelCase = np.argmax(UpperCamelCase_ , axis=1 )
lowerCamelCase = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(UpperCamelCase_ ):
lowerCamelCase = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 246
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCamelCase = 1_2_8
elif "12-12" in model_name:
lowerCamelCase = 1_2
lowerCamelCase = 1_2
elif "14-14" in model_name:
lowerCamelCase = 1_4
lowerCamelCase = 1_4
elif "16-16" in model_name:
lowerCamelCase = 1_6
lowerCamelCase = 1_6
else:
raise ValueError('Model not supported' )
lowerCamelCase = 'huggingface/label-files'
if "speech-commands" in model_name:
lowerCamelCase = 3_5
lowerCamelCase = 'speech-commands-v2-id2label.json'
else:
lowerCamelCase = 5_2_7
lowerCamelCase = 'audioset-id2label.json'
lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase = idalabel
lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def a_ ( UpperCamelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
if "module.v" in name:
lowerCamelCase = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowerCamelCase = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowerCamelCase = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowerCamelCase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowerCamelCase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCamelCase = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowerCamelCase = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowerCamelCase = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
lowerCamelCase = key.split('.' )
lowerCamelCase = int(key_split[3] )
lowerCamelCase = config.hidden_size
if "weight" in key:
lowerCamelCase = val[:dim, :]
lowerCamelCase = val[dim : dim * 2, :]
lowerCamelCase = val[-dim:, :]
else:
lowerCamelCase = val[:dim]
lowerCamelCase = val[dim : dim * 2]
lowerCamelCase = val[-dim:]
else:
lowerCamelCase = val
return orig_state_dict
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
@torch.no_grad()
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : int=False ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = get_audio_spectrogram_transformer_config(UpperCamelCase_ )
lowerCamelCase = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowerCamelCase = model_name_to_url[model_name]
lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location='cpu' )
# remove some keys
remove_keys(UpperCamelCase_ )
# rename some keys
lowerCamelCase = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
# load 🤗 model
lowerCamelCase = ASTForAudioClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCamelCase = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
lowerCamelCase = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
lowerCamelCase = 1_0_2_4 if 'speech-commands' not in model_name else 1_2_8
lowerCamelCase = ASTFeatureExtractor(mean=UpperCamelCase_ , std=UpperCamelCase_ , max_length=UpperCamelCase_ )
if "speech-commands" in model_name:
lowerCamelCase = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowerCamelCase = dataset[0]['audio']['array']
else:
lowerCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowerCamelCase , lowerCamelCase = torchaudio.load(UpperCamelCase_ )
lowerCamelCase = waveform.squeeze().numpy()
lowerCamelCase = feature_extractor(UpperCamelCase_ , sampling_rate=1_6_0_0_0 , return_tensors='pt' )
# forward pass
lowerCamelCase = model(**UpperCamelCase_ )
lowerCamelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCamelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCamelCase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCamelCase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCamelCase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCamelCase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCamelCase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCamelCase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCamelCase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 246
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ):
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('RGB' )
return image
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = dct.pop(lowerCamelCase )
UpperCAmelCase__ = val
def a_ ( lowerCamelCase , lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase__ = torch.cat((q_bias, torch.zeros_like(lowerCamelCase , requires_grad=lowerCamelCase ), v_bias) )
UpperCAmelCase__ = qkv_bias
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 3_6_4 if 'coco' in model_name else 2_2_4
UpperCAmelCase__ = BlipaVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase__ = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase__ = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCAmelCase__ = BlipaConfig(vision_config=lowerCamelCase , text_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
UpperCAmelCase__ = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCAmelCase__ = tokenizer('\n' , add_special_tokens=lowerCamelCase ).input_ids[0]
UpperCAmelCase__ , UpperCAmelCase__ = get_blipa_config(lowerCamelCase , eos_token_id=lowerCamelCase )
UpperCAmelCase__ = BlipaForConditionalGeneration(lowerCamelCase ).eval()
UpperCAmelCase__ = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCAmelCase__ , UpperCAmelCase__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_model_and_preprocess(
name=lowerCamelCase , model_type=lowerCamelCase , is_eval=lowerCamelCase , device=lowerCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCAmelCase__ = original_model.state_dict()
UpperCAmelCase__ = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
if key.startswith('Qformer.bert' ):
UpperCAmelCase__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCAmelCase__ = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCAmelCase__ = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCAmelCase__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCAmelCase__ = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCAmelCase__ = key.replace('t5' , 'language' )
UpperCAmelCase__ = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = hf_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert len(lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase__ = load_demo_image()
UpperCAmelCase__ = vis_processors['eval'](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
UpperCAmelCase__ = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCamelCase )
# create processor
UpperCAmelCase__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase , image_std=lowerCamelCase )
UpperCAmelCase__ = BlipaProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCAmelCase__ = processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values.to(lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase__ = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCAmelCase__ = hf_model(lowerCamelCase , lowerCamelCase ).logits
else:
UpperCAmelCase__ = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCAmelCase__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase__ = hf_model(lowerCamelCase , lowerCamelCase , labels=lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowerCamelCase )
else:
# cast to same type
UpperCAmelCase__ = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase ) , lowerCamelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCAmelCase__ = ''
UpperCAmelCase__ = tokenizer(lowerCamelCase , return_tensors='pt' ).input_ids.to(lowerCamelCase )
UpperCAmelCase__ = original_model.generate({'image': original_pixel_values} )
UpperCAmelCase__ = hf_model.generate(
lowerCamelCase , lowerCamelCase , do_sample=lowerCamelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCamelCase )
UpperCAmelCase__ = input_ids.shape[1]
UpperCAmelCase__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase )
UpperCAmelCase__ = [text.strip() for text in output_text]
print('HF generation:' , lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
lowerCAmelCase__ : List[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704
|
"""simple docstring"""
import socket
def a_ ( ):
UpperCAmelCase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase__ = socket.gethostname()
UpperCAmelCase__ = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
UpperCAmelCase__ = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 632
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Any = 16
A_ : List[str] = 32
def A ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = args.model_name_or_path
set_seed(lowercase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
SCREAMING_SNAKE_CASE__ = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ = 0
# Now we train the model
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
SCREAMING_SNAKE_CASE__ = model(**lowercase_ )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**lowercase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
SCREAMING_SNAKE_CASE__ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE__ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase_ , default=lowercase_ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=3 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 196
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """hf-internal-testing/tiny-random-t5"""
lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowercase = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase = model.generate(**_lowerCAmelCase )
lowercase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """hf-internal-testing/tiny-random-t5"""
lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
lowercase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
lowercase = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase )
| 588
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase (UpperCAmelCase__ : int ):
if hor == 1_2_8:
SCREAMING_SNAKE_CASE = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE = (3_2, 1_2_8, 2_5_6)
SCREAMING_SNAKE_CASE = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 3_2:
SCREAMING_SNAKE_CASE = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE = (3_2, 6_4, 1_2_8, 2_5_6)
SCREAMING_SNAKE_CASE = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
SCREAMING_SNAKE_CASE = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 1_4,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5_5_3_6,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
hf_value_function.load_state_dict(UpperCAmelCase__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = {
"in_channels": 1_4,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (3_2, 6_4, 1_2_8, 2_5_6),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5_5_3_6,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
hf_value_function.load_state_dict(UpperCAmelCase__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 647
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 1
|
from __future__ import annotations
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = str(lowerCamelCase_ )
lowercase__ = [n]
for i in range(1 , len(lowerCamelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a ( lowerCamelCase_ ):
'''simple docstring'''
if len(str(lowerCamelCase_ ) ) > 3:
if not is_prime(int(str(lowerCamelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase_ )[:3] ) ):
return False
return True
def a ( lowerCamelCase_ = 11 ):
'''simple docstring'''
lowercase__ = []
lowercase__ = 13
while len(lowerCamelCase_ ) != count:
if validate(lowerCamelCase_ ):
lowercase__ = list_truncated_nums(lowerCamelCase_ )
if all(is_prime(lowerCamelCase_ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase_ )
num += 2
return list_truncated_primes
def a ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(11)) = }")
| 183
|
from jiwer import compute_measures
import datasets
A__ : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A__ : Optional[int] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A__ : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
], )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase, lowerCamelCase )["wer"]
else:
lowercase__ = 0
lowercase__ = 0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = compute_measures(lowerCamelCase, lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 183
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( _lowerCamelCase ):
create_state_space_tree(_lowerCamelCase , [] , 0 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 701
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676
| 0
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = LongformerTokenizer
a_ : Union[str, Any] = True
a_ : Tuple = LongformerTokenizerFast
a_ : Any = True
def _UpperCamelCase ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ = {'unk_token': '<unk>'}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , **SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = 'lower newer'
lowerCamelCase__ = 'lower newer'
return input_text, output_text
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = 'lower newer'
lowerCamelCase__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
lowerCamelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(
'sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = 'Encode this sequence.'
lowerCamelCase__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing spaces after special tokens
lowerCamelCase__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )} ) # mask token has a left space
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'Encode <mask> sequence'
lowerCamelCase__ = 'Encode <mask>sequence'
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
pass
def _UpperCamelCase ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'A, <mask> AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _UpperCamelCase ( self : Optional[Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(post_processor_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(post_processor_state['trim_offsets'] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ = F'{text_of_1_token} {text_of_1_token}'
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ) + 1, 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
| 659
|
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659
| 1
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase = """"""
UpperCAmelCase = """"""
UpperCAmelCase = """"""
UpperCAmelCase = """"""
def __magic_name__ ( _lowerCamelCase: str ) -> None:
'''simple docstring'''
lowerCAmelCase = tweepy.OAuthHandler(_lowerCamelCase, _lowerCamelCase )
auth.set_access_token(_lowerCamelCase, _lowerCamelCase )
lowerCAmelCase = tweepy.API(_lowerCamelCase )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase = api.user_timeline(screen_name=_lowerCamelCase, count=200 )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# save the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_lowerCamelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase = api.user_timeline(
screen_name=_lowerCamelCase, count=200, max_id=_lowerCamelCase )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# update the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
print(F"""...{len(_lowerCamelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""", '''w''' ) as f:
lowerCAmelCase = csv.writer(_lowerCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_lowerCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 535
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowercase ( lowercase__ ):
def __get__(self : str ,SCREAMING_SNAKE_CASE_ : List[Any] ,SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> int:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCAmelCase = '''__cached_''' + self.fget.__name__
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if cached is None:
lowerCAmelCase = self.fget(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return cached
def __magic_name__ ( _lowerCamelCase: Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def __magic_name__ ( _lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase, (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase, np.ndarray )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
return isinstance(_lowerCamelCase, np.ndarray )
def __magic_name__ ( _lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Dict ) -> Optional[int]:
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase, torch.Tensor )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase, torch.device )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> int:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: str ) -> Dict:
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase, _lowerCamelCase ):
if hasattr(_lowerCamelCase, _lowerCamelCase ):
lowerCAmelCase = getattr(_lowerCamelCase, _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase, torch.dtype )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase, tf.Tensor )
def __magic_name__ ( _lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase, '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def __magic_name__ ( _lowerCamelCase: Union[str, Any] ) -> str:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase, jnp.ndarray )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: List[str] ) -> Any:
'''simple docstring'''
if isinstance(_lowerCamelCase, (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase, (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __magic_name__ ( _lowerCamelCase: Optional[int] ) -> Any:
'''simple docstring'''
if isinstance(_lowerCamelCase, (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase, (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class lowercase ( lowercase__ ):
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase = getattr(self ,class_fields[0].name )
lowerCAmelCase = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = first_field.items()
lowerCAmelCase = True
else:
try:
lowerCAmelCase = iter(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = True
except TypeError:
lowerCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE_ ):
if (
not isinstance(SCREAMING_SNAKE_CASE_ ,(list, tuple) )
or not len(SCREAMING_SNAKE_CASE_ ) == 2
or not isinstance(element[0] ,SCREAMING_SNAKE_CASE_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowerCAmelCase = element[1]
elif first_field is not None:
lowerCAmelCase = first_field
else:
for field in class_fields:
lowerCAmelCase = getattr(self ,field.name )
if v is not None:
lowerCAmelCase = v
def __delitem__(self : Optional[Any] ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase (self : List[Any] ,*SCREAMING_SNAKE_CASE_ : Any ,**SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase (self : Union[str, Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase (self : Any ,*SCREAMING_SNAKE_CASE_ : Optional[Any] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__(self : int ,SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self : int ,SCREAMING_SNAKE_CASE_ : Any ,SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
super().__setattr__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __setitem__(self : List[str] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
"""simple docstring"""
super().__setitem__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class lowercase ( lowercase__ ,lowercase__ ):
@classmethod
def UpperCAmelCase (cls : int ,SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowercase ( lowercase__ ):
lowercase = '''longest'''
lowercase = '''max_length'''
lowercase = '''do_not_pad'''
class lowercase ( lowercase__ ):
lowercase = '''pt'''
lowercase = '''tf'''
lowercase = '''np'''
lowercase = '''jax'''
class lowercase :
def __init__(self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : List[ContextManager] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = context_managers
lowerCAmelCase = ExitStack()
def __enter__(self : int ) -> Dict:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE_ )
def __exit__(self : Tuple ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
self.stack.__exit__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( _lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = infer_framework(_lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __magic_name__ ( _lowerCamelCase: Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = model_class.__name__
lowerCAmelCase = infer_framework(_lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __magic_name__ ( _lowerCamelCase: MutableMapping, _lowerCamelCase: str = "", _lowerCamelCase: str = "." ) -> Optional[int]:
'''simple docstring'''
def _flatten_dict(_lowerCamelCase: Any, _lowerCamelCase: Optional[int]="", _lowerCamelCase: Union[str, Any]="." ):
for k, v in d.items():
lowerCAmelCase = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase, _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase, _lowerCamelCase, delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) )
@contextmanager
def __magic_name__ ( _lowerCamelCase: Union[str, Any], _lowerCamelCase: bool = False ) -> Tuple:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __magic_name__ ( _lowerCamelCase: Optional[int], _lowerCamelCase: int=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase, axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase, perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase, axes=_lowerCamelCase )
else:
raise ValueError(F"""Type not supported for transpose: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: List[Any], _lowerCamelCase: Any ) -> List[str]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase, _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase, _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase, _lowerCamelCase )
else:
raise ValueError(F"""Type not supported for reshape: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: Tuple=None ) -> List[str]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase, axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase, axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase, axis=_lowerCamelCase )
else:
raise ValueError(F"""Type not supported for squeeze: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: List[str], _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase, _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase, axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase, axis=_lowerCamelCase )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: Any ) -> Dict:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: Optional[Any], _lowerCamelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase, (tuple, list) ):
lowerCAmelCase = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase = F"""{repo_id}--{value}"""
return auto_map
def __magic_name__ ( _lowerCamelCase: str ) -> Any:
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
lowerCAmelCase = base_class.__module__
lowerCAmelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 535
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
A : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__UpperCamelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace("*" , __UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = "weight"
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ = name.split("." )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE_ = ""
if is_finetuned:
SCREAMING_SNAKE_CASE_ = UniSpeechSatForCTC(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = UniSpeechSatForPreTraining(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
SCREAMING_SNAKE_CASE_ = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A : Optional[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 356
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1e-3 ) )
@slow
def __A ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1e-3 ) )
| 356
| 1
|
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 108
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = "mgp-str"
def __init__( self , A_=[32, 128] , A_=4 , A_=3 , A_=27 , A_=38 , A_=5_0257 , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=4.0 , A_=True , A_=False , A_=1e-5 , A_=0.0 , A_=0.0 , A_=0.0 , A_=False , A_=0.0_2 , **A_ , ) -> List[str]:
super().__init__(**A_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = max_token_length
lowerCAmelCase = num_character_labels
lowerCAmelCase = num_bpe_labels
lowerCAmelCase = num_wordpiece_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = mlp_ratio
lowerCAmelCase = distilled
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = drop_rate
lowerCAmelCase = qkv_bias
lowerCAmelCase = attn_drop_rate
lowerCAmelCase = drop_path_rate
lowerCAmelCase = output_aa_attentions
lowerCAmelCase = initializer_range
| 433
| 0
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if args.student_type == "roberta":
__a = False
elif args.student_type == "gpt2":
__a = False
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if args.student_type == "roberta":
__a = False
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=_SCREAMING_SNAKE_CASE , choices=["""distilbert""", """roberta""", """gpt2"""] , required=_SCREAMING_SNAKE_CASE , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=_SCREAMING_SNAKE_CASE , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=_SCREAMING_SNAKE_CASE , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=_SCREAMING_SNAKE_CASE , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=_SCREAMING_SNAKE_CASE , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=_SCREAMING_SNAKE_CASE , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=_SCREAMING_SNAKE_CASE , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=_SCREAMING_SNAKE_CASE , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=_SCREAMING_SNAKE_CASE , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=_SCREAMING_SNAKE_CASE , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=_SCREAMING_SNAKE_CASE , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=_SCREAMING_SNAKE_CASE , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_SCREAMING_SNAKE_CASE , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=_SCREAMING_SNAKE_CASE , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=_SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=_SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=_SCREAMING_SNAKE_CASE , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=_SCREAMING_SNAKE_CASE , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_SCREAMING_SNAKE_CASE , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=_SCREAMING_SNAKE_CASE , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=_SCREAMING_SNAKE_CASE , default=4000 , help="""Checkpoint interval.""" )
__a = parser.parse_args()
sanity_checks(_SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(_SCREAMING_SNAKE_CASE )
set_seed(_SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
__a , __a , __a = MODEL_CLASSES[args.student_type]
__a , __a , __a = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__a = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__a = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__a = tokenizer.all_special_tokens.index(_SCREAMING_SNAKE_CASE )
__a = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
__a = special_tok_ids
__a = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
__a = pickle.load(_SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
__a = pickle.load(_SCREAMING_SNAKE_CASE )
__a = np.maximum(_SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__a = 0.0 # do not predict special tokens
__a = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
__a = None
__a = LmSeqsDataset(params=_SCREAMING_SNAKE_CASE , data=_SCREAMING_SNAKE_CASE )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
__a = student_config_class.from_pretrained(args.student_config )
__a = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
__a = student_model_class.from_pretrained(args.student_pretrained_weights , config=_SCREAMING_SNAKE_CASE )
else:
__a = student_model_class(_SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
__a = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__a = Distiller(
params=_SCREAMING_SNAKE_CASE , dataset=_SCREAMING_SNAKE_CASE , token_probs=_SCREAMING_SNAKE_CASE , student=_SCREAMING_SNAKE_CASE , teacher=_SCREAMING_SNAKE_CASE )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 547
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='mgp-str'
def __init__( self : Optional[Any] , __lowercase : Dict=[32, 128] , __lowercase : Tuple=4 , __lowercase : Optional[Any]=3 , __lowercase : str=27 , __lowercase : Any=38 , __lowercase : str=50257 , __lowercase : List[str]=30522 , __lowercase : Tuple=768 , __lowercase : str=12 , __lowercase : Optional[Any]=12 , __lowercase : str=4.0 , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=1E-5 , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : List[str]=False , __lowercase : List[Any]=0.02 , **__lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = image_size
__a = patch_size
__a = num_channels
__a = max_token_length
__a = num_character_labels
__a = num_bpe_labels
__a = num_wordpiece_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = mlp_ratio
__a = distilled
__a = layer_norm_eps
__a = drop_rate
__a = qkv_bias
__a = attn_drop_rate
__a = drop_path_rate
__a = output_aa_attentions
__a = initializer_range
| 547
| 1
|
"""simple docstring"""
from collections.abc import Callable
class __lowercase :
def __init__( self : Tuple ,A : Callable | None = None ):
'''simple docstring'''
# Stores actual heap items.
UpperCAmelCase__ : list = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase__ : dict = {}
# Stores current size of heap.
UpperCAmelCase__ : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase__ : int = key or (lambda A : x)
def __lowercase ( self : Union[str, Any] ,A : int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Tuple ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Any ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : List[Any] ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i]
def __lowercase ( self : Optional[int] ,A : int ,A : int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : Optional[int] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self._left(A )
UpperCAmelCase__ : Dict = self._right(A )
UpperCAmelCase__ : Optional[int] = i
if left is not None and not self._cmp(A ,A ):
UpperCAmelCase__ : List[Any] = left
if right is not None and not self._cmp(A ,A ):
UpperCAmelCase__ : List[Any] = right
return valid_parent
def __lowercase ( self : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self._parent(A )
while parent is not None and not self._cmp(A ,A ):
self._swap(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A )
def __lowercase ( self : str ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A )
def __lowercase ( self : Optional[Any] ,A : int ,A : int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase__ : Tuple = self.pos_map[item]
UpperCAmelCase__ : Dict = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def __lowercase ( self : List[Any] ,A : int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase__ : Any = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase__ : Dict = self.arr[self.size - 1]
UpperCAmelCase__ : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def __lowercase ( self : str ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
UpperCAmelCase__ : List[str] = [item, self.key(A )]
UpperCAmelCase__ : Union[str, Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : str ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A )
return config, inputs_dict
def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Dict = model_class_name(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,)
UpperCAmelCase__ : Dict = model.decode(A ,A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : str = model_class_name(A )
UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A )
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxPegasusModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A ,A ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A )
UpperCAmelCase__ : int = model_class(A )
@jax.jit
def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ):
return model.encode(input_ids=A ,attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = model_class(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ):
return model.decode(
decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A )
UpperCAmelCase__ : Any = np.ones((1, 1) )
UpperCAmelCase__ : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A )
UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences
UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A )
assert tgt_text == decoded
| 65
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
a_ = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a_ = 1
@register_to_config
def __init__( self : Any , __A : int = 2_0_0_0 , __A : float = 0.1_5 , __A : float = 0.0_1 , __A : float = 1_3_4_8.0 , __A : float = 1e-5 , __A : int = 1 , ):
# standard deviation of the initial noise distribution
snake_case__ : Tuple = sigma_max
# setable values
snake_case__ : Union[str, Any] = None
self.set_sigmas(__A , __A , __A , __A )
def _lowercase ( self : Any , __A : torch.FloatTensor , __A : Optional[int] = None ):
return sample
def _lowercase ( self : str , __A : int , __A : float = None , __A : Union[str, torch.device] = None ):
snake_case__ : List[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case__ : Union[str, Any] = torch.linspace(1 , __A , __A , device=__A )
def _lowercase ( self : str , __A : int , __A : float = None , __A : float = None , __A : float = None ):
snake_case__ : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
snake_case__ : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
snake_case__ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__A , __A )
snake_case__ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case__ : List[str] = torch.exp(torch.linspace(math.log(__A ) , math.log(__A ) , __A ) )
snake_case__ : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self : Tuple , __A : int , __A : str ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase ( self : Union[str, Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : Optional[torch.Generator] = None , __A : bool = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
snake_case__ : Dict = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case__ : str = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case__ : Optional[int] = timesteps.to(self.discrete_sigmas.device )
snake_case__ : Dict = self.discrete_sigmas[timesteps].to(sample.device )
snake_case__ : List[Any] = self.get_adjacent_sigma(__A , __A ).to(sample.device )
snake_case__ : Any = torch.zeros_like(__A )
snake_case__ : int = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case__ : Any = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case__ : Optional[int] = diffusion.unsqueeze(-1 )
snake_case__ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case__ : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__A , device=sample.device , dtype=sample.dtype )
snake_case__ : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case__ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__A , prev_sample_mean=__A )
def _lowercase ( self : Optional[int] , __A : torch.FloatTensor , __A : torch.FloatTensor , __A : Optional[torch.Generator] = None , __A : bool = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case__ : Optional[int] = randn_tensor(sample.shape , layout=sample.layout , generator=__A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case__ : Dict = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case__ : Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case__ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case__ : Optional[int] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case__ : Optional[int] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case__ : List[Any] = step_size.unsqueeze(-1 )
snake_case__ : List[str] = sample + step_size * model_output
snake_case__ : Optional[int] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def _lowercase ( self : Any , __A : torch.FloatTensor , __A : torch.FloatTensor , __A : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ : Tuple = timesteps.to(original_samples.device )
snake_case__ : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case__ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__A ) * sigmas[:, None, None, None]
)
snake_case__ : str = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 25
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ['''pixel_values''']
def __init__( self : Union[str, Any] ,__A : bool = True ,__A : Dict[str, int] = None ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : bool = True ,__A : Dict[str, int] = None ,__A : bool = True ,__A : Union[int, float] = 1 / 255 ,__A : bool = True ,__A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,__A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**__A : Optional[int] ,) -> None:
super().__init__(**__A )
_lowercase = size if size is not None else {'shortest_edge': 224}
_lowercase = get_size_dict(__A ,default_to_square=__A )
_lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowercase = get_size_dict(__A ,param_name='crop_size' )
_lowercase = do_resize
_lowercase = size
_lowercase = resample
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_normalize
_lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self : int ,__A : np.ndarray ,__A : Dict[str, int] ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : List[str] ,) -> np.ndarray:
_lowercase = get_size_dict(__A ,default_to_square=__A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowercase = int((256 / 224) * size['shortest_edge'] )
_lowercase = get_resize_output_image_size(__A ,size=__A ,default_to_square=__A )
_lowercase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__A ,size=(size_dict['height'], size_dict['width']) ,resample=__A ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : int ,__A : np.ndarray ,__A : Dict[str, int] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Optional[int] ,) -> np.ndarray:
_lowercase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__A ,size=(size['height'], size['width']) ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : np.ndarray ,__A : Union[int, float] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : List[str] ,) -> np.ndarray:
return rescale(__A ,scale=__A ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : str ,__A : np.ndarray ,__A : Union[float, List[float]] ,__A : Union[float, List[float]] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : int ,) -> np.ndarray:
return normalize(__A ,mean=__A ,std=__A ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : Dict ,__A : ImageInput ,__A : Optional[bool] = None ,__A : Optional[Dict[str, int]] = None ,__A : PILImageResampling = None ,__A : Optional[bool] = None ,__A : Optional[Dict[str, int]] = None ,__A : Optional[bool] = None ,__A : Optional[float] = None ,__A : Optional[bool] = None ,__A : Optional[Union[float, Iterable[float]]] = None ,__A : Optional[Union[float, Iterable[float]]] = None ,__A : Optional[TensorType] = None ,__A : ChannelDimension = ChannelDimension.FIRST ,**__A : Optional[Any] ,) -> BatchFeature:
_lowercase = do_resize if do_resize is not None else self.do_resize
_lowercase = resample if resample is not None else self.resample
_lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase = do_rescale if do_rescale is not None else self.do_rescale
_lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase = do_normalize if do_normalize is not None else self.do_normalize
_lowercase = image_mean if image_mean is not None else self.image_mean
_lowercase = image_std if image_std is not None else self.image_std
_lowercase = size if size is not None else self.size
_lowercase = get_size_dict(__A ,default_to_square=__A )
_lowercase = crop_size if crop_size is not None else self.crop_size
_lowercase = get_size_dict(__A ,param_name='crop_size' )
_lowercase = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowercase = [to_numpy_array(__A ) for image in images]
if do_resize:
_lowercase = [self.resize(__A ,__A ,__A ) for image in images]
if do_center_crop:
_lowercase = [self.center_crop(__A ,__A ) for image in images]
if do_rescale:
_lowercase = [self.rescale(__A ,__A ) for image in images]
if do_normalize:
_lowercase = [self.normalize(__A ,__A ,__A ) for image in images]
_lowercase = [to_channel_dimension_format(__A ,__A ) for image in images]
_lowercase = {'pixel_values': images}
return BatchFeature(data=__A ,tensor_type=__A )
| 67
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 67
| 1
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-1'''
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-2'''
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-3'''
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-4'''
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Any:
super()._init_()
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(snake_case_ )
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(snake_case_ )
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(snake_case_ )
_UpperCAmelCase = StableDiffusionPipeline(
vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , requires_safety_checker=snake_case_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __A ( self ) -> Dict[str, Any]:
return {k: getattr(self , snake_case_ ) for k in self.config.keys() if not k.startswith("_" )}
def __A ( self , snake_case_ = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def __A ( self ) -> Tuple:
self.enable_attention_slicing(snake_case_ )
@torch.no_grad()
def __A ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Union[str, Any]:
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def __A ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Union[str, Any]:
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def __A ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> List[str]:
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def __A ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Dict:
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def __A ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Union[str, Any]:
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCAmelCase = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCAmelCase = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCAmelCase = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCAmelCase = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 579
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def A__ ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_info.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ , "dataset_info.json" ) )
def A__ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(A__ )
_UpperCAmelCase = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def A__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A__ ( A__ , A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ , "README.md" ) )
| 579
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Dict = logging.get_logger(__name__)
_A : int = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = """instructblip_vision_model"""
def __init__( self , A_=14_08 , A_=61_44 , A_=39 , A_=16 , A_=2_24 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = qkv_bias
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = """instructblip_qformer"""
def __init__( self , A_=3_05_22 , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=14_08 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = cross_attention_frequency
SCREAMING_SNAKE_CASE__ = encoder_hidden_size
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """instructblip"""
lowerCamelCase__ : List[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE__ = InstructBlipVisionConfig(**A_ )
SCREAMING_SNAKE_CASE__ = InstructBlipQFormerConfig(**A_ )
SCREAMING_SNAKE_CASE__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[text_model_type](**A_ )
SCREAMING_SNAKE_CASE__ = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE__ = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE__ = num_query_tokens
SCREAMING_SNAKE_CASE__ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = 1.0
SCREAMING_SNAKE_CASE__ = 0.02
@classmethod
def lowercase_ ( cls , A_ , A_ , A_ , **A_ , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 100
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "llama"
_A = ["past_key_values"]
def __init__( self : Optional[int] , A_ : Optional[int]=3_20_00 , A_ : Union[str, Any]=40_96 , A_ : Optional[int]=1_10_08 , A_ : int=32 , A_ : int=32 , A_ : Tuple=None , A_ : str="silu" , A_ : Union[str, Any]=20_48 , A_ : List[str]=0.02 , A_ : Union[str, Any]=1e-6 , A_ : Dict=True , A_ : Union[str, Any]=0 , A_ : str=1 , A_ : Tuple=2 , A_ : List[str]=1 , A_ : Dict=False , A_ : List[str]=None , **A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = vocab_size
lowerCamelCase_: List[str] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = hidden_size
lowerCamelCase_: Optional[Any] = intermediate_size
lowerCamelCase_: Any = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase_: str = num_attention_heads
lowerCamelCase_: str = num_key_value_heads
lowerCamelCase_: str = hidden_act
lowerCamelCase_: List[str] = initializer_range
lowerCamelCase_: Any = rms_norm_eps
lowerCamelCase_: int = pretraining_tp
lowerCamelCase_: Optional[Any] = use_cache
lowerCamelCase_: Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ , )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCamelCase_: str = self.rope_scaling.get("""type""" , A_ )
lowerCamelCase_: Optional[int] = self.rope_scaling.get("""factor""" , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 423
| 0
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase__ = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCAmelCase__ = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
UpperCAmelCase__ = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
UpperCAmelCase__ = shift_tokens_right(__lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase__ = model(__lowercase , decoder_input_ids=__lowercase ).logits
UpperCAmelCase__ = optax.softmax_cross_entropy(__lowercase , onehot(__lowercase , logits.shape[-1] ) ).mean()
UpperCAmelCase__ = -(labels.shape[-1] * loss.item())
UpperCAmelCase__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 422
|
"""simple docstring"""
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase = "" , __lowercase = False ):
# Mapping from the first character of the prefix of the node
UpperCAmelCase__ = {}
# A node will be a leaf if the tree contains its word
UpperCAmelCase__ = is_leaf
UpperCAmelCase__ = prefix
def A__ ( self , __lowercase ):
UpperCAmelCase__ = 0
for q, w in zip(self.prefix , __lowercase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A__ ( self , __lowercase ):
for word in words:
self.insert(__lowercase )
def A__ ( self , __lowercase ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCAmelCase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCAmelCase__ = RadixNode(prefix=__lowercase , is_leaf=__lowercase )
else:
UpperCAmelCase__ = self.nodes[word[0]]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = incoming_node.match(
__lowercase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowercase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCAmelCase__ = remaining_prefix
UpperCAmelCase__ = self.nodes[matching_string[0]]
UpperCAmelCase__ = RadixNode(__lowercase , __lowercase )
UpperCAmelCase__ = aux_node
if remaining_word == "":
UpperCAmelCase__ = True
else:
self.nodes[matching_string[0]].insert(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = self.nodes.get(word[0] , __lowercase )
if not incoming_node:
return False
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = incoming_node.match(
__lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = self.nodes.get(word[0] , __lowercase )
if not incoming_node:
return False
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = incoming_node.match(
__lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowercase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCAmelCase__ = list(self.nodes.values() )[0]
UpperCAmelCase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCAmelCase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCAmelCase__ = False
# If there is 1 edge, we merge it with its child
else:
UpperCAmelCase__ = list(incoming_node.nodes.values() )[0]
UpperCAmelCase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCAmelCase__ = merging_node.nodes
return True
def A__ ( self , __lowercase = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case__ ( ) ->bool:
UpperCAmelCase__ = """banana bananas bandana band apple all beast""".split()
UpperCAmelCase__ = RadixNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case__ ( ) ->None:
assert test_trie()
def snake_case__ ( ) ->None:
UpperCAmelCase__ = RadixNode()
UpperCAmelCase__ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(_SCREAMING_SNAKE_CASE )
print("""Words:""" , _SCREAMING_SNAKE_CASE )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 422
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_=None, A_=None, A_=0 ) -> str:
UpperCAmelCase__ =1.0 if scale is None else scale
UpperCAmelCase__ =0.0 if loc is None else loc
super().__init__(A_, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=A_ )] )
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self ) -> Any:
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self, A_, A_, A_, **A_ ) -> None:
super().__init__(**A_ )
UpperCAmelCase__ =args_dim
UpperCAmelCase__ =nn.ModuleList([nn.Linear(A_, A_ ) for dim in args_dim.values()] )
UpperCAmelCase__ =domain_map
def __UpperCAmelCase ( self, A_ ) -> Tuple[torch.Tensor]:
UpperCAmelCase__ =[proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self, A_ ) -> int:
super().__init__()
UpperCAmelCase__ =function
def __UpperCAmelCase ( self, A_, *A_ ) -> Any:
return self.function(A_, *A_ )
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self, A_ = 1 ) -> None:
UpperCAmelCase__ =dim
UpperCAmelCase__ ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self, A_ ) -> Tuple:
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ), 1 )
def __UpperCAmelCase ( self, A_, A_ = None, A_ = None, ) -> Distribution:
UpperCAmelCase__ =self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_, loc=A_, scale=A_, event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self ) -> int:
return len(self.event_shape )
@property
def __UpperCAmelCase ( self ) -> float:
return 0.0
def __UpperCAmelCase ( self, A_ ) -> nn.Module:
return ParameterProjection(
in_features=A_, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map ), )
def __UpperCAmelCase ( self, *A_ ) -> Optional[Any]:
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( A_ ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase = StudentT
@classmethod
def __UpperCAmelCase ( cls, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase__ =2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = {"loc": 1, "scale": 1}
__UpperCamelCase = Normal
@classmethod
def __UpperCAmelCase ( cls, A_, A_ ) -> int:
UpperCAmelCase__ =cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = {"total_count": 1, "logits": 1}
__UpperCamelCase = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self, A_ ) -> Distribution:
UpperCAmelCase__ , UpperCAmelCase__ =distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_, logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_, logits=A_ ), 1 )
def __UpperCAmelCase ( self, A_, A_ = None, A_ = None ) -> Distribution:
UpperCAmelCase__ , UpperCAmelCase__ =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 625
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( A ):
'''simple docstring'''
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case_ :
'''simple docstring'''
def __UpperCAmelCase ( self, A_, A_ ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[int]:
UpperCAmelCase__ =np.abs((a - b) ).max()
self.assertLessEqual(A_, A_, f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> List[str]:
UpperCAmelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(A_, A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_vision_text_model(A_, A_ )
UpperCAmelCase__ ={"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_vision_text_model(A_, A_ )
UpperCAmelCase__ ={"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
UpperCAmelCase__ =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(A_ )
UpperCAmelCase__ =model(input_ids=A_, pixel_values=A_, attention_mask=A_ )
UpperCAmelCase__ =after_output[0]
UpperCAmelCase__ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_, 1E-3 )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_=None, **A_ ) -> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_vision_text_model(A_, A_ )
UpperCAmelCase__ ={"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
UpperCAmelCase__ =model(
input_ids=A_, pixel_values=A_, attention_mask=A_, output_attentions=A_ )
UpperCAmelCase__ =output.vision_model_output.attentions
self.assertEqual(len(A_ ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ =to_atuple(vision_model.config.image_size )
UpperCAmelCase__ =to_atuple(vision_model.config.patch_size )
UpperCAmelCase__ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase__ =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase__ =output.text_model_output.attentions
self.assertEqual(len(A_ ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Union[str, Any]:
pt_model.to(A_ )
pt_model.eval()
# prepare inputs
UpperCAmelCase__ =inputs_dict
UpperCAmelCase__ ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase__ =pt_model(**A_ ).to_tuple()
UpperCAmelCase__ =fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ), len(A_ ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(A_, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(A_, from_pt=A_ )
UpperCAmelCase__ =fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(len(A_ ), len(A_ ), "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(A_, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
UpperCAmelCase__ =VisionTextDualEncoderModel.from_pretrained(A_, from_flax=A_ )
pt_model_loaded.to(A_ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase__ =pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(len(A_ ), len(A_ ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(A_, pt_output_loaded.numpy(), 4E-2 )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Tuple:
UpperCAmelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(A_, A_ )
UpperCAmelCase__ =VisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), A_ )
UpperCAmelCase__ =fx_state
self.check_pt_flax_equivalence(A_, A_, A_ )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Dict:
UpperCAmelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(A_, A_ )
UpperCAmelCase__ =VisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel(A_ )
UpperCAmelCase__ =load_flax_weights_in_pytorch_model(A_, fx_model.params )
self.check_pt_flax_equivalence(A_, A_, A_ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A_ )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A_ )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_save_load(**A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A_ )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ =config_inputs_dict.pop("vision_config" )
UpperCAmelCase__ =config_inputs_dict.pop("text_config" )
UpperCAmelCase__ =config_inputs_dict
self.check_equivalence_pt_to_flax(A_, A_, A_ )
self.check_equivalence_flax_to_pt(A_, A_, A_ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ =self.get_pretrained_model_and_inputs()
UpperCAmelCase__ =model_a(**A_ )
UpperCAmelCase__ =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A_ )
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(A_ )
UpperCAmelCase__ =model_a(**A_ )
UpperCAmelCase__ =after_outputs[0]
UpperCAmelCase__ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_, 1E-5 )
@require_flax
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=A_, text_from_pt=A_, )
UpperCAmelCase__ =13
UpperCAmelCase__ =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCAmelCase__ =random_attention_mask([batch_size, 4] )
UpperCAmelCase__ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self, A_, A_ ) -> Optional[int]:
UpperCAmelCase__ =FlaxViTModel(A_ )
UpperCAmelCase__ =FlaxBertModel(A_ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =FlaxViTModelTester(self )
UpperCAmelCase__ =FlaxBertModelTester(self )
UpperCAmelCase__ =vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ =bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ =vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=A_, text_from_pt=A_, )
UpperCAmelCase__ =13
UpperCAmelCase__ =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCAmelCase__ =random_attention_mask([batch_size, 4] )
UpperCAmelCase__ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self, A_, A_ ) -> Tuple:
UpperCAmelCase__ =FlaxCLIPVisionModel(A_ )
UpperCAmelCase__ =FlaxBertModel(A_ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =FlaxCLIPVisionModelTester(self )
UpperCAmelCase__ =FlaxBertModelTester(self )
UpperCAmelCase__ =clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ =bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ =vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0 )
UpperCAmelCase__ =VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCAmelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ =processor(
text=["una foto di un gatto", "una foto di un cane"], images=A_, padding=A_, return_tensors="np" )
UpperCAmelCase__ =model(**A_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
UpperCAmelCase__ =np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image, A_, atol=1E-3 ) )
| 625
| 1
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a__ : str = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Tuple ) -> int:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = eval_examples
__SCREAMING_SNAKE_CASE = post_process_function
__SCREAMING_SNAKE_CASE = quant_trainer_args
__SCREAMING_SNAKE_CASE = 1_2_8 # default number of calibration samples
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple=None ) -> Optional[int]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
__SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
__SCREAMING_SNAKE_CASE = self._remove_unused_columns(UpperCAmelCase__ , description="Calibration" )
return DataLoader(
UpperCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple=None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
__SCREAMING_SNAKE_CASE = self.get_calib_dataloader(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args , calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prediction_step(UpperCAmelCase__ , UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str = "eval" ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
__SCREAMING_SNAKE_CASE = self.get_eval_dataloader(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE = self.compute_metrics
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE = eval_loop(
UpperCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
__SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__SCREAMING_SNAKE_CASE = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
__SCREAMING_SNAKE_CASE = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__SCREAMING_SNAKE_CASE = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str = "test" ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE = self.compute_metrics
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE = eval_loop(
UpperCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
__SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__SCREAMING_SNAKE_CASE = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , "predict" )
__SCREAMING_SNAKE_CASE = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__SCREAMING_SNAKE_CASE = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int="./" ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.eval_dataset
__SCREAMING_SNAKE_CASE = self.get_eval_dataloader(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
__SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
__SCREAMING_SNAKE_CASE = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
__SCREAMING_SNAKE_CASE = model.module if hasattr(UpperCAmelCase__ , "module" ) else model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , "model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
__SCREAMING_SNAKE_CASE = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , export_params=UpperCAmelCase__ , opset_version=1_3 , do_constant_folding=UpperCAmelCase__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=UpperCAmelCase__ , )
logger.info("onnx export finished" )
| 553
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : Dict = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
def constraint_to_multiple_of(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0 , lowerCAmelCase_=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase_ )
return (new_height, new_width)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = ["pixel_values"]
def __init__( self : int , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 553
| 1
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case = logging.getLogger(__name__)
snake_case = 50 # max width of layer names
snake_case = 70 # max width of quantizer names
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : int = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__lowercase , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__lowercase , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__lowercase , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__lowercase , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__lowercase , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__lowercase , type=__lowercase , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__lowercase , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ : Union[str, Any] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ : Optional[int] = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ : List[str] = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowerCAmelCase__ : str = QuantDescriptor(num_bits=args.aprec , calib_method=__lowercase )
lowerCAmelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowercase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowercase )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowercase , ["embeddings"] , which="weight" , _disabled=__lowercase )
if args.quant_disable:
set_quantizer_by_name(__lowercase , [""] , _disabled=__lowercase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowercase , args.quant_disable_keyword , _disabled=__lowercase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowercase , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=__lowercase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowercase , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=__lowercase )
if args.recalibrate_weights:
recalibrate_weights(__lowercase )
if args.fuse_qkv:
fuse_qkv(__lowercase , __lowercase )
if args.clip_gelu:
clip_gelu(__lowercase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowercase )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowercase )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def fusea(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(__lowercase , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ : List[Any] = qq._amax.detach().item()
lowerCAmelCase__ : List[Any] = qk._amax.detach().item()
lowerCAmelCase__ : Optional[int] = qv._amax.detach().item()
lowerCAmelCase__ : List[str] = max(__lowercase , __lowercase , __lowercase )
qq._amax.fill_(__lowercase )
qk._amax.fill_(__lowercase )
qv._amax.fill_(__lowercase )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowercase )
lowerCAmelCase__ : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowercase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ : Optional[Any] = mod.weight.shape[0]
lowerCAmelCase__ : Tuple = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ : Dict = torch.ones(__lowercase , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowercase , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ : str = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowercase , keepdims=__lowercase ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCAmelCase__ : Optional[int] = amax
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=2_5 , lowerCamelCase_=1_8_0 , lowerCamelCase_=None ):
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ : Any = []
elif not isinstance(__lowercase , __lowercase ):
lowerCAmelCase__ : Any = [ignore]
lowerCAmelCase__ : str = 0
for name, mod in model.named_modules():
if not hasattr(__lowercase , "weight" ):
continue
lowerCAmelCase__ : Any = max(__lowercase , len(__lowercase ) )
for name, mod in model.named_modules():
lowerCAmelCase__ : Any = getattr(__lowercase , "_input_quantizer" , __lowercase )
lowerCAmelCase__ : int = getattr(__lowercase , "_weight_quantizer" , __lowercase )
if not hasattr(__lowercase , "weight" ):
continue
if type(__lowercase ) in ignore:
continue
if [True for s in ignore if type(__lowercase ) is str and s in name]:
continue
lowerCAmelCase__ : List[Any] = f'''Act:{input_q.extra_repr()}'''
lowerCAmelCase__ : Tuple = f'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase__ : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__lowercase ) <= line_width:
logger.info(__lowercase )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 0
for name, mod in model.named_modules():
if isinstance(__lowercase , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = getattr(__lowercase , __lowercase , __lowercase )
if quantizer_mod is not None:
assert hasattr(__lowercase , __lowercase )
setattr(__lowercase , __lowercase , __lowercase )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="both" , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Any = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__lowercase , __lowercase , "_input_quantizer" , __lowercase , __lowercase )
if which in ["weight", "both"]:
set_quantizer(__lowercase , __lowercase , "_weight_quantizer" , __lowercase , __lowercase )
logger.info(__lowercase )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__lowercase , "_input_quantizer" ) or hasattr(__lowercase , "_weight_quantizer" ):
for n in names:
if re.search(__lowercase , __lowercase ):
set_quantizers(__lowercase , __lowercase , **__lowercase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__lowercase , __lowercase ):
lowerCAmelCase__ : Optional[Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(__lowercase , __lowercase , __lowercase )
logger.info(__lowercase )
| 378
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = """dandelin/vilt-b32-finetuned-vqa"""
_lowerCamelCase : List[Any] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_lowerCamelCase : str = """image_qa"""
_lowerCamelCase : List[str] = AutoProcessor
_lowerCamelCase : Optional[int] = AutoModelForVisualQuestionAnswering
_lowerCamelCase : Tuple = ["""image""", """text"""]
_lowerCamelCase : Optional[int] = ["""text"""]
def __init__( self : int , *snake_case_ : Tuple , **snake_case_ : Optional[int] ):
requires_backends(self , ["vision"] )
super().__init__(*snake_case_ , **snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : "Image" , snake_case_ : str ):
return self.pre_processor(snake_case_ , snake_case_ , return_tensors="pt" )
def lowercase ( self : List[Any] , snake_case_ : Dict ):
with torch.no_grad():
return self.model(**snake_case_ ).logits
def lowercase ( self : int , snake_case_ : Any ):
_UpperCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 236
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def snake_case ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : list[int] , a_ : int , ) -> list[float]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = coefficient_matrix.shape
UpperCamelCase_ : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase_ : List[Any] = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(a_ )
if colsa != 1:
UpperCamelCase_ : Dict = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(a_ )
if rowsa != rowsa:
UpperCamelCase_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(a_ )
if len(a_ ) != rowsa:
UpperCamelCase_ : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
f"matrix but received {len(a_ )} and {rowsa}"
)
raise ValueError(a_ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
UpperCamelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase_ : Any = table.shape
strictly_diagonally_dominant(a_ )
# Iterates the whole matrix for given number of times
for _ in range(a_ ):
UpperCamelCase_ : Dict = []
for row in range(a_ ):
UpperCamelCase_ : int = 0
for col in range(a_ ):
if col == row:
UpperCamelCase_ : Any = table[row][col]
elif col == cols - 1:
UpperCamelCase_ : int = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase_ : Optional[int] = (temp + val) / denom
new_val.append(a_ )
UpperCamelCase_ : Dict = new_val
return [float(a_ ) for i in new_val]
def snake_case ( a_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
UpperCamelCase_ : List[Any] = table.shape
UpperCamelCase_ : Dict = True
for i in range(0 , a_ ):
UpperCamelCase_ : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase ={
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCamelCase =[
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCamelCase =[
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCamelCase =[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 543
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = (DPMSolverSinglestepScheduler,)
_SCREAMING_SNAKE_CASE : Dict = (('''num_inference_steps''', 25),)
def _lowerCAmelCase ( self : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _lowerCAmelCase ( self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 , **_SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE : Dict = 0.1 * sample
SCREAMING_SNAKE_CASE : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = sample, sample
for t in range(_SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE : str = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , **_SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = self.dummy_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE : Dict = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self : Optional[int] , _SCREAMING_SNAKE_CASE : str=None , **_SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = 10
SCREAMING_SNAKE_CASE : str = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Optional[int] = 50
SCREAMING_SNAKE_CASE : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE : Any = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
SCREAMING_SNAKE_CASE : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , algorithm_type='dpmsolver++' , solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , algorithm_type=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Any = self.full_loop(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , algorithm_type=_SCREAMING_SNAKE_CASE , )
assert not torch.isnan(_SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type='learned_range' )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=0 )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.full_loop()
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(use_karras_sigmas=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(thresholding=_SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 265
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A_ : List[Any] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A_ : Tuple = parser.parse_args()
A_ : Optional[int] = 'cpu'
A_ : Dict = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
A_ : List[str] = 'path-to-your-trained-model'
A_ : Any = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A_ : Tuple = pipe.to(device)
# to channels last
A_ : List[str] = pipe.unet.to(memory_format=torch.channels_last)
A_ : List[str] = pipe.vae.to(memory_format=torch.channels_last)
A_ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A_ : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A_ : List[Any] = torch.randn(2, 4, 64, 64)
A_ : int = torch.rand(1) * 999
A_ : int = torch.randn(2, 77, 768)
A_ : Tuple = (sample, timestep, encoder_hidden_status)
try:
A_ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A_ : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A_ : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A_ : Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A_ : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A_ : Union[str, Any] = 666
A_ : Optional[Any] = torch.Generator(device).manual_seed(seed)
A_ : Union[str, Any] = {'generator': generator}
if args.steps is not None:
A_ : int = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A_ : int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 265
| 1
|
import logging
from transformers import PretrainedConfig
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """bertabs"""
def __init__( self ,lowercase=30522 ,lowercase=512 ,lowercase=6 ,lowercase=512 ,lowercase=8 ,lowercase=512 ,lowercase=0.2 ,lowercase=6 ,lowercase=768 ,lowercase=8 ,lowercase=2048 ,lowercase=0.2 ,**lowercase ,):
"""simple docstring"""
super().__init__(**lowercase)
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = max_pos
UpperCAmelCase_ : List[str] = enc_layers
UpperCAmelCase_ : List[str] = enc_hidden_size
UpperCAmelCase_ : Any = enc_heads
UpperCAmelCase_ : Any = enc_ff_size
UpperCAmelCase_ : Optional[int] = enc_dropout
UpperCAmelCase_ : List[str] = dec_layers
UpperCAmelCase_ : List[Any] = dec_hidden_size
UpperCAmelCase_ : List[str] = dec_heads
UpperCAmelCase_ : List[Any] = dec_ff_size
UpperCAmelCase_ : List[str] = dec_dropout
| 708
|
import operator
def _snake_case ( __snake_case , __snake_case = False , __snake_case = None ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
UpperCAmelCase_ : int = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 455
| 0
|
import math
def snake_case_ (__A : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ (__A : float = 0.1 ) -> int:
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : Distribution , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=0 ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = 1.0 if scale is None else scale
__lowerCAmelCase : Union[str, Any] = 0.0 if loc is None else loc
super().__init__(lowerCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCAmelCase )] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Callable[..., Tuple[torch.Tensor]] , **lowerCAmelCase : Union[str, Any] ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__lowerCAmelCase : List[Any] = args_dim
__lowerCAmelCase : Dict = nn.ModuleList([nn.Linear(lowerCAmelCase , lowerCAmelCase ) for dim in args_dim.values()] )
__lowerCAmelCase : int = domain_map
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
__lowerCAmelCase : List[str] = [proj(lowerCAmelCase ) for proj in self.proj]
return self.domain_map(*lowerCAmelCase )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase : str ) -> Any:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[Any] = function
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : str , *lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.function(lowerCAmelCase , *lowerCAmelCase )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : type
lowerCamelCase : int
lowerCamelCase : Dict[str, int]
def __init__( self : Optional[int] , lowerCAmelCase : int = 1 ) -> None:
"""simple docstring"""
__lowerCAmelCase : Tuple = dim
__lowerCAmelCase : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCAmelCase )
else:
return Independent(self.distribution_class(*lowerCAmelCase ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
__lowerCAmelCase : int = self._base_distribution(lowerCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCAmelCase , loc=lowerCAmelCase , scale=lowerCAmelCase , event_dim=self.event_dim )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase : torch.Tensor ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCAmelCase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict[str, int] ={"df": 1, "loc": 1, "scale": 1}
lowerCamelCase : type =StudentT
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase : torch.Tensor , lowerCAmelCase : torch.Tensor , lowerCAmelCase : torch.Tensor ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = cls.squareplus(lowerCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase : Tuple = 2.0 + cls.squareplus(lowerCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict[str, int] ={"loc": 1, "scale": 1}
lowerCamelCase : type =Normal
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase : torch.Tensor , lowerCAmelCase : torch.Tensor ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = cls.squareplus(lowerCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict[str, int] ={"total_count": 1, "logits": 1}
lowerCamelCase : type =NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase : torch.Tensor , lowerCAmelCase : torch.Tensor ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = cls.squareplus(lowerCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] ) -> Distribution:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCAmelCase , logits=lowerCAmelCase )
else:
return Independent(self.distribution_class(total_count=lowerCAmelCase , logits=lowerCAmelCase ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 651
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
_snake_case : int = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
_snake_case : Optional[int] = {
'camembert-base': 512,
}
_snake_case : int = '▁'
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Optional[int]="</s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : Any="<s>" , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
__lowerCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCAmelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__lowerCAmelCase = len(self.fairseq_tokens_to_ids )
__lowerCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : Optional[int] ) -> Optional[int]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : str , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCAmelCase_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = []
__lowerCAmelCase = ''
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__lowerCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __getstate__( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Tuple , lowerCAmelCase_ : List[str] ) -> List[Any]:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 421
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('GET', 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET', 'https://huggingface.co', timeout=1.0 )
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET', 'https://huggingface.co' )
def a_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('https://huggingface.co' )
| 421
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class snake_case ( UpperCamelCase_ ):
def __init__( self : Dict , a_ : str , a_ : Optional[int]=None , a_ : str=True , a_ : Optional[Any]=None , **a_ : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : str = config_class
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_text_modality
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs
SCREAMING_SNAKE_CASE__ : str = common_properties
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE__ : int = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(a_ , a_ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(a_ ):
try:
setattr(a_ , a_ , a_ )
self.parent.assertEqual(
getattr(a_ , a_ ) , a_ , msg=F'''`{name} value {idx} expected, but was {getattr(a_ , a_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(a_ ):
try:
SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(a_ , a_ ) , a_ , msg=F'''`{name} value {idx} expected, but was {getattr(a_ , a_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE__ : Tuple = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , a_ )
def __lowercase( self : int )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a_ , 'config.json' )
config_first.to_json_file(a_ )
SCREAMING_SNAKE_CASE__ : int = self.config_class.from_json_file(a_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.config_class.from_pretrained(a_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(a_ , a_ )
config_first.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class.from_pretrained(a_ , subfolder=a_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowercase( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE__ : int = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE__ : Any = self.config_class()
self.parent.assertIsNotNone(a_ )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**a_ )
SCREAMING_SNAKE_CASE__ : Any = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(a_ , a_ ) != value:
wrong_values.append((key, getattr(a_ , a_ ), value) )
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE__ : str = '\n'.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 85
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str:
__snake_case : Optional[Any] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = last_hidden_size
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Tuple = conv_kernel_size
__snake_case : Any = output_stride
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = is_training
__snake_case : Dict = num_labels
__snake_case : Any = initializer_range
__snake_case : Optional[int] = scope
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict:
__snake_case : List[Any] = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = self.num_labels
__snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> Any:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : Any ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ):
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : int = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : str = model.to(lowerCamelCase )
__snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Optional[int] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Tuple = model.to(lowerCamelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
__snake_case : Dict = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__snake_case : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( lowercase_):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =relative_attention
_lowerCAmelCase =position_biased_input
_lowerCAmelCase =pos_att_type
_lowerCAmelCase =scope
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self , __A ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =DebertaVaModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A )[0]
_lowerCAmelCase =model(__A , token_type_ids=__A )[0]
_lowerCAmelCase =model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Union[str, Any] = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = True
lowercase : List[Any] = False
lowercase : Optional[int] = False
lowercase : Dict = False
lowercase : Optional[Any] = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =DebertaVaModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def UpperCamelCase__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCamelCase__ ( self ) -> int:
pass
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowerCAmelCase =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_lowerCAmelCase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase =model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
_lowerCAmelCase =torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
snake_case_ : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = 2
while digits < n:
index += 1
snake_case_ : Any = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : int = model_type_to_module_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , "__name__" , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : Optional[Any] = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def a__ ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = kwargs.pop("config" , snake_case__ )
lowerCAmelCase : Optional[Any] = kwargs.pop("trust_remote_code" , snake_case__ )
lowerCAmelCase : Dict = True
lowerCAmelCase , lowerCAmelCase : Optional[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ )
lowerCAmelCase : Any = config_dict.get("feature_extractor_type" , snake_case__ )
lowerCAmelCase : Tuple = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase : Tuple = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.feature_extractor_type``
lowerCAmelCase : Optional[Any] = getattr(snake_case__ , "feature_extractor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowerCAmelCase : Optional[Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowerCAmelCase : Dict = feature_extractor_class_from_name(snake_case__ )
lowerCAmelCase : Tuple = feature_extractor_auto_map is not None
lowerCAmelCase : Optional[int] = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING
lowerCAmelCase : Dict = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
lowerCAmelCase : Any = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
lowerCAmelCase : str = kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCAmelCase : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )]
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
| 681
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = image.size
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__UpperCAmelCase : Optional[int] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__UpperCAmelCase : Optional[Any] = np.array(lowercase_ ).astype(np.floataa ) / 2_5_5.0
__UpperCAmelCase : Dict = image[None].transpose(0 , 3 , 1 , 2 )
__UpperCAmelCase : Any = torch.from_numpy(lowercase_ )
return 2.0 * image - 1.0
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(vqvae=lowercase__ , unet=lowercase__ , scheduler=lowercase__)
@torch.no_grad()
def __call__( self , lowercase__ = None , lowercase__ = 1 , lowercase__ = 1_0_0 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
if isinstance(lowercase__ , PIL.Image.Image):
__UpperCAmelCase : Optional[int] = 1
elif isinstance(lowercase__ , torch.Tensor):
__UpperCAmelCase : Tuple = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase__)}")
if isinstance(lowercase__ , PIL.Image.Image):
__UpperCAmelCase : List[str] = preprocess(lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__UpperCAmelCase : Any = (batch_size, self.unet.config.in_channels // 2, height, width)
__UpperCAmelCase : Union[str, Any] = next(self.unet.parameters()).dtype
__UpperCAmelCase : Dict = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__)
__UpperCAmelCase : int = image.to(device=self.device , dtype=lowercase__)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase__ , device=self.device)
__UpperCAmelCase : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
__UpperCAmelCase : List[str] = eta
for t in self.progress_bar(lowercase__):
# concat latents and low resolution image in the channel dimension.
__UpperCAmelCase : Optional[Any] = torch.cat([latents, image] , dim=1)
__UpperCAmelCase : int = self.scheduler.scale_model_input(lowercase__ , lowercase__)
# predict the noise residual
__UpperCAmelCase : Optional[int] = self.unet(lowercase__ , lowercase__).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : int = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
# decode the image latents with the VQVAE
__UpperCAmelCase : Dict = self.vqvae.decode(lowercase__).sample
__UpperCAmelCase : Optional[Any] = torch.clamp(lowercase__ , -1.0 , 1.0)
__UpperCAmelCase : Optional[int] = image / 2 + 0.5
__UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 462
|
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Optional[Any] = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__UpperCAmelCase : str = [2, 4, 6, 8, 1_0, 1_2]
__UpperCAmelCase : List[Any] = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ , lowercase__ , lowercase__) , 2_1_0)
def A( self):
self.assertRaisesRegex(lowercase__ , '''max_weight must greater than zero.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''Weight can not be negative.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''Profit can not be negative.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''max_weight must greater than zero.''')
def A( self):
self.assertRaisesRegex(
lowercase__ , '''The length of profit and weight must be same.''')
if __name__ == "__main__":
unittest.main()
| 462
| 1
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ( ) -> Union[str, Any]:
from torch.utils.cpp_extension import load
__magic_name__ = Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__magic_name__ = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 190
|
"""simple docstring"""
def lowercase ( ) -> int:
return 1
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__UpperCamelCase )
def lowercase ( __UpperCamelCase = 200 ) -> int:
return two_pound(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 190
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : List[Any]=4_0_0 , _UpperCamelCase : Optional[Any]=2_0_0_0 , _UpperCamelCase : Dict=1 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Tuple=1_6_0_0_0 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Union[str, Any]=8_0 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=6_4 , _UpperCamelCase : Tuple="hann_window" , _UpperCamelCase : Dict=8_0 , _UpperCamelCase : int=7_6_0_0 , _UpperCamelCase : Union[str, Any]=1e-10 , _UpperCamelCase : str=True , ) ->Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = do_normalize
snake_case_ = num_mel_bins
snake_case_ = hop_length
snake_case_ = win_length
snake_case_ = win_function
snake_case_ = fmin
snake_case_ = fmax
snake_case_ = mel_floor
snake_case_ = return_attention_mask
def snake_case__( self : Union[str, Any] ) ->str:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case__( self : Dict , _UpperCamelCase : str=False , _UpperCamelCase : Any=False ) ->Tuple:
def _flatten(_UpperCamelCase : int ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]=False ) ->Union[str, Any]:
if equal_length:
snake_case_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaFeatureExtractor
def snake_case__( self : List[Any] ) ->int:
snake_case_ = SpeechTaFeatureExtractionTester(self )
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
self.assertTrue(np.all(np.mean(_UpperCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case__( self : List[str] ) ->List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case_ = feat_extract(_UpperCamelCase , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(_UpperCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case__( self : Any ) ->Tuple:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
snake_case_ = [floats_list((1, x) )[0] for x in lengths]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = feat_extract(_UpperCamelCase , max_length=_UpperCamelCase , padding=_UpperCamelCase )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case__( self : Optional[int] ) ->Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def snake_case__( self : Dict ) ->str:
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case__( self : List[Any] ) ->List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ = feature_extractor(audio_target=_UpperCamelCase , padding=_UpperCamelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ = np.asarray(_UpperCamelCase )
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Any ) ->Tuple:
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCamelCase ) == len(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case__( self : Optional[Any] ) ->str:
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCamelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = [len(_UpperCamelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCamelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ = [len(_UpperCamelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(_UpperCamelCase )
snake_case_ = feat_extract.num_mel_bins # hack!
snake_case_ = feat_extract.pad(
_UpperCamelCase , padding='''max_length''' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) ->Any:
from datasets import load_dataset
snake_case_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ = ds.sort('''id''' ).select(range(_UpperCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__( self : List[Any] ) ->List[Any]:
# fmt: off
snake_case_ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _UpperCamelCase , atol=1e-6 ) )
def snake_case__( self : Optional[int] ) ->List[str]:
# fmt: off
snake_case_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = feature_extractor(audio_target=_UpperCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _UpperCamelCase , atol=1e-4 ) )
| 39
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE__ = False
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : str , UpperCAmelCase : Dict=32):
set_seed(0)
SCREAMING_SNAKE_CASE_ :Dict = UNetaDModel(sample_size=UpperCAmelCase , in_channels=3 , out_channels=3)
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :List[str] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE_ :Any = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :str = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
SCREAMING_SNAKE_CASE_ :Any = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(UpperCAmelCase) for _ in range(4)]
SCREAMING_SNAKE_CASE_ :str = [torch.randn((4, 3, 32, 32)).to(UpperCAmelCase) for _ in range(4)]
SCREAMING_SNAKE_CASE_ :List[Any] = [torch.randint(0 , 10_00 , (4,)).long().to(UpperCAmelCase) for _ in range(4)]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = self.get_model_optimizer(resolution=32)
model.train().to(UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ :List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_ :Optional[Any] = model(UpperCAmelCase , timesteps[i]).sample
SCREAMING_SNAKE_CASE_ :List[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_model_optimizer(resolution=32)
model.train().to(UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ :List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_ :Tuple = model(UpperCAmelCase , timesteps[i]).sample
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
| 631
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a : int = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Optional[int] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : Optional[int]=8 ) ->List[str]:
'''simple docstring'''
a : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Tuple:
super().__init__()
self.register_modules(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , movq=lowerCAmelCase__ , )
a : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if latents is None:
a : List[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a : str = latents.to(lowerCAmelCase__ )
a : List[Any] = latents * scheduler.init_noise_sigma
return latents
def __a ( self , lowerCAmelCase__=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
a : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
a : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
a, a : Any = cpu_offload_with_hook(lowerCAmelCase__ , lowerCAmelCase__ , prev_module_hook=lowerCAmelCase__ )
# We'll offload the last model manually.
a : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> List[str]:
a : Any = self._execution_device
a : List[str] = guidance_scale > 1.0
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : str = torch.cat(lowerCAmelCase__ , dim=0 )
a : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[str] = torch.cat(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
a : Optional[int] = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
a : Any = negative_image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
a : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__ )
a : Any = self.scheduler.timesteps
a : Any = self.unet.config.in_channels
a, a : Dict = downscale_height_and_width(lowerCAmelCase__ , lowerCAmelCase__ , self.movq_scale_factor )
# create initial latent
a : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
a : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Any = {"image_embeds": image_embeds}
a : List[Any] = self.unet(
sample=lowerCAmelCase__ , timestep=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , added_cond_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
if do_classifier_free_guidance:
a, a : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
a, a : str = noise_pred.chunk(2 )
a, a : str = variance_pred.chunk(2 )
a : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a, a : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a : Tuple = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ , )[0]
# post-processing
a : Union[str, Any] = self.movq.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a : Tuple = image * 0.5 + 0.5
a : int = image.clamp(0 , 1 )
a : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a : Optional[int] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 31
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_)
| 34
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase : Optional[int] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class snake_case__ ( unittest.TestCase ):
_lowerCAmelCase =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
snake_case__ : List[str] = ZeroShotClassificationPipeline(
model=_lowerCamelCase , tokenizer=_lowerCamelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
snake_case__ : Any = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_lowerCamelCase , {'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase )]} )
# No kwarg
snake_case__ : Any = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_lowerCamelCase , {'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase )]} )
snake_case__ : str = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_lowerCamelCase , {'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase )]} )
snake_case__ : Any = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_lowerCamelCase , {'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
snake_case__ : Optional[int] = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_lowerCamelCase , {'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
snake_case__ : Optional[Any] = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_lowerCamelCase , {'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case__ : List[str] = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_lowerCamelCase , [
{'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(1 )
] , )
snake_case__ : int = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_lowerCamelCase , [
{'sequence': ANY(_lowerCamelCase ), 'labels': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], 'scores': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_lowerCamelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_lowerCamelCase ):
classifier(_lowerCamelCase , candidate_labels='politics' )
with self.assertRaises(_lowerCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_lowerCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_lowerCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_lowerCamelCase , )
self.run_entailment_id(_lowerCamelCase )
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : Pipeline ):
snake_case__ : List[str] = zero_shot_classifier.model.config
snake_case__ : Union[str, Any] = config.labelaid
snake_case__ : Dict = zero_shot_classifier.entailment_id
snake_case__ : List[str] = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case__ : List[Any] = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case__ : Dict = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case__ : List[str] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case__ : Optional[int] = original_labelaid
self.assertEqual(_lowerCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[str] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : List[Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
snake_case__ : Optional[Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : str = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
snake_case__ : str = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCAmelCase__ ( self : int ):
snake_case__ : Optional[Any] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
snake_case__ : int = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
snake_case__ : Union[str, Any] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : List[Any] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
snake_case__ : int = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
snake_case__ : List[str] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 170
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__a = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
_A : Union[str, Any] = None
@experimental
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
return _map_with_joblib(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = num_proc if num_proc <= len(_lowercase ) else len(_lowercase )
snake_case_ :int = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowercase ):
snake_case_ :List[str] = len(_lowercase ) // num_proc
snake_case_ :Any = len(_lowercase ) % num_proc
snake_case_ :Optional[int] = div * index + min(_lowercase, _lowercase )
snake_case_ :Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(_lowercase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(_lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case_, snake_case_ :Optional[int] = None, None
if not disable_tqdm:
snake_case_, snake_case_ :List[str] = (RLock(),), tqdm.set_lock
with Pool(_lowercase, initargs=_lowercase, initializer=_lowercase ) as pool:
snake_case_ :Optional[Any] = pool.map(_lowercase, _lowercase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case_ :Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(_lowercase )} objects""" )
return mapped
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=_lowercase ):
return joblib.Parallel()(
joblib.delayed(_lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case_ :Optional[int] = None
| 310
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionDiffEditPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
UpperCamelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : List[Any] = frozenset([] )
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_zero=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCAmelCase__ : str = CLIPTextModel(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : Tuple = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Tuple = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : str = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : str = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = pipe_loaded(**UpperCamelCase_ )[0]
UpperCAmelCase__ : Tuple = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase_ , 1E-4 )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = 'cpu'
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.get_dummy_mask_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = pipe.generate_mask(**UpperCamelCase_ )
UpperCAmelCase__ : Any = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase__ : str = np.array([0] * 9 )
UpperCAmelCase__ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = 'cpu'
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.get_dummy_inversion_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = pipe.invert(**UpperCamelCase_ ).images
UpperCAmelCase__ : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def __snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = 'cpu'
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
UpperCAmelCase__ : List[str] = DPMSolverMultistepScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = DPMSolverMultistepInverseScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inversion_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = pipe.invert(**UpperCamelCase_ ).images
UpperCAmelCase__ : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
UpperCAmelCase__ : List[Any] = raw_image.convert('RGB' ).resize((768, 768) )
UpperCAmelCase__ : List[Any] = raw_image
def __snake_case ( self ):
UpperCAmelCase__ : int = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 'a bowl of fruit'
UpperCAmelCase__ : Optional[Any] = 'a bowl of pears'
UpperCAmelCase__ : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
UpperCAmelCase__ : str = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ ).latents
UpperCAmelCase__ : Any = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
UpperCAmelCase__ : List[str] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __snake_case ( self ):
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ : List[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 'a bowl of fruit'
UpperCAmelCase__ : Tuple = 'a bowl of pears'
UpperCAmelCase__ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
UpperCAmelCase__ : Tuple = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ , num_inference_steps=25 , ).latents
UpperCAmelCase__ : Any = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
UpperCAmelCase__ : Union[str, Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 110
|
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ (__lowercase ):
'''simple docstring'''
_a = "funnel"
_a = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[int] , __a : List[Any]=30_522 , __a : Dict=[4, 4, 4] , __a : List[Any]=None , __a : Tuple=2 , __a : int=768 , __a : Dict=12 , __a : List[str]=64 , __a : Optional[int]=3_072 , __a : List[Any]="gelu_new" , __a : List[str]=0.1 , __a : str=0.1 , __a : Any=0.0 , __a : List[str]=0.1 , __a : Optional[Any]=None , __a : Optional[Any]=1e-9 , __a : Optional[Any]="mean" , __a : List[str]="relative_shift" , __a : Optional[Any]=True , __a : Optional[int]=True , __a : Optional[int]=True , **__a : str , ) ->Tuple:
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : Any = block_sizes
lowerCamelCase_ : Any = [1] * len(_A ) if block_repeats is None else block_repeats
assert len(_A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCamelCase_ : Any = num_decoder_layers
lowerCamelCase_ : Dict = d_model
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : List[str] = d_head
lowerCamelCase_ : Optional[Any] = d_inner
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Union[str, Any] = hidden_dropout
lowerCamelCase_ : int = attention_dropout
lowerCamelCase_ : Union[str, Any] = activation_dropout
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Optional[int] = initializer_std
lowerCamelCase_ : Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
lowerCamelCase_ : Optional[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
lowerCamelCase_ : List[Any] = attention_type
lowerCamelCase_ : List[str] = separate_cls
lowerCamelCase_ : List[str] = truncate_seq
lowerCamelCase_ : List[Any] = pool_q_only
super().__init__(**_A )
@property
def _lowerCAmelCase ( self : int ) ->List[str]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowerCAmelCase ( self : List[Any] , __a : Optional[int] ) ->str:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
return len(self.block_sizes )
@num_blocks.setter
def _lowerCAmelCase ( self : List[Any] , __a : Optional[int] ) ->Optional[Any]:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 705
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCamelCase ( A__ : int , A__ : Dict , A__ : int=0 ) -> Union[str, Any]:
# Format the message.
if name is None:
lowerCamelCase_ : Optional[Any] = None
else:
lowerCamelCase_ : int = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowerCamelCase_ : Tuple = fmt.format(A__ )
# Print and recurse (if needed).
if isinstance(A__ , A__ ):
if msg is not None:
print(A__ )
for k in val.keys():
recursive_print(A__ , val[k] , spaces + 2 )
elif isinstance(A__ , torch.Tensor ):
print(A__ , """:""" , val.size() )
else:
print(A__ , """:""" , A__ )
def __lowerCamelCase ( A__ : Tuple , A__ : Dict , A__ : str , A__ : Any , A__ : List[str] ) -> List[str]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCamelCase_ : int = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase_ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase_ : Union[str, Any] = param.view(*A__ )
lowerCamelCase_ : List[Any] = param.transpose(0 , 2 )
lowerCamelCase_ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase_ : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase_ : List[str] = param.view(*A__ )
lowerCamelCase_ : Any = param.transpose(0 , 1 ).contiguous()
lowerCamelCase_ : Optional[Any] = param.view(*A__ )
return param
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Tuple , A__ : str ) -> Dict:
# The converted output model.
lowerCamelCase_ : Tuple = {}
# old versions did not store training args
lowerCamelCase_ : Any = input_state_dict.get("""args""" , A__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase_ : Union[str, Any] = ds_args.padded_vocab_size
lowerCamelCase_ : List[str] = ds_args.max_position_embeddings
lowerCamelCase_ : Tuple = ds_args.hidden_size
lowerCamelCase_ : List[Any] = ds_args.num_layers
lowerCamelCase_ : Optional[int] = ds_args.num_attention_heads
lowerCamelCase_ : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase_ : List[Any] = config.n_head
# The hidden_size per head.
lowerCamelCase_ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
lowerCamelCase_ : Union[str, Any] = 0.0
# The model.
lowerCamelCase_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
lowerCamelCase_ : str = model["""language_model"""]
# The embeddings.
lowerCamelCase_ : Any = lm["""embedding"""]
# The word embeddings.
lowerCamelCase_ : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase_ : Tuple = word_embeddings[: config.vocab_size, :]
lowerCamelCase_ : str = word_embeddings
# The position embeddings.
lowerCamelCase_ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCamelCase_ : List[str] = pos_embeddings
# The transformer.
lowerCamelCase_ : List[str] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCamelCase_ : Any = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowerCamelCase_ : int = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase_ : Any = layer_re.match(A__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase_ : Dict = int(m.group(1 ) )
# The name of the operation.
lowerCamelCase_ : List[str] = m.group(2 )
# Is it a weight or a bias?
lowerCamelCase_ : Any = m.group(3 )
# The name of the layer.
lowerCamelCase_ : List[str] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowerCamelCase_ : Any = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowerCamelCase_ : Union[str, Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A__ , A__ )
lowerCamelCase_ : int = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase_ : Any = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCamelCase_ : Tuple = masked_bias
lowerCamelCase_ : Optional[Any] = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase_ : Optional[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCamelCase_ : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase_ : List[Any] = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Store. No change of shape.
lowerCamelCase_ : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase_ : Union[str, Any] = megatron_to_transformers[op_name]
lowerCamelCase_ : Any = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase_ : List[str] = megatron_to_transformers[op_name]
lowerCamelCase_ : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase_ : List[str] = transformer["""final_layernorm.weight"""]
lowerCamelCase_ : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase_ : int = word_embeddings
# It should be done!
return output_state_dict
def __lowerCamelCase ( ) -> Optional[Any]:
# Create the argument parser.
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=A__ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=A__ , help="""An optional config json file describing the pre-trained model.""" , )
lowerCamelCase_ : Optional[int] = parser.parse_args()
# Extract the basename.
lowerCamelCase_ : List[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowerCamelCase_ : str = torch.load(A__ , map_location="""cpu""" )
else:
lowerCamelCase_ : Optional[int] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowerCamelCase_ : Optional[int] = input_state_dict.get("""args""" , A__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase_ : int = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCamelCase_ : Optional[Any] = """gelu_new"""
else:
lowerCamelCase_ : Union[str, Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase_ : List[Any] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCamelCase_ : Any = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=A__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=A__ , summary_activation=A__ , summary_proj_to_labels=A__ , summary_first_dropout=0.1 , scale_attn_weights=A__ , use_cache=A__ , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
lowerCamelCase_ : Any = GPTaConfig.from_json_file(args.config_file )
lowerCamelCase_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowerCamelCase_ : Dict = convert_megatron_checkpoint(A__ , A__ , A__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A__ , A__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase_ : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase_ : List[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase_ : Tuple = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCamelCase_ : Any = """gpt2"""
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(A__ )
lowerCamelCase_ : str = type(A__ ).__name__
lowerCamelCase_ : List[str] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(A__ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(A__ )
# Store the state_dict to file.
lowerCamelCase_ : Union[str, Any] = os.path.join(A__ , """pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(A__ , A__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 171
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : str = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mra"""
def __init__( self , A_=50265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=1 , A_=0.02 , A_=1e-5 , A_="absolute" , A_=4 , A_="full" , A_=0 , A_=0 , A_=1 , A_=0 , A_=2 , **A_ , )-> Dict:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = block_per_row
UpperCamelCase = approx_mode
UpperCamelCase = initial_prior_first_n_blocks
UpperCamelCase = initial_prior_diagonal_n_blocks
| 3
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : List[str] = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 263
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[str] = 'Speech2TextFeatureExtractor'
__a : Optional[int] = 'Speech2TextTokenizer'
def __init__( self , __a , __a ):
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.feature_extractor
__lowerCamelCase : List[str] = False
def __call__( self , *__a , **__a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__lowerCamelCase : str = kwargs.pop('raw_speech' )
else:
__lowerCamelCase : Optional[Any] = kwargs.pop('audio' , __a )
__lowerCamelCase : Optional[Any] = kwargs.pop('sampling_rate' , __a )
__lowerCamelCase : Optional[int] = kwargs.pop('text' , __a )
if len(__a ) > 0:
__lowerCamelCase : Optional[int] = args[0]
__lowerCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowerCamelCase : Dict = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__lowerCamelCase : Optional[int] = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase : Any = encodings['input_ids']
return inputs
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def snake_case_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__lowerCamelCase : int = True
__lowerCamelCase : Union[str, Any] = self.tokenizer
yield
__lowerCamelCase : Tuple = self.feature_extractor
__lowerCamelCase : List[str] = False
| 263
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_snake_case = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 382
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : int = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 219
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__A : Any = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 701
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="pt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True} if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not line.startswith(""" """ ) else {}
SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = input_ids.ne(SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : Any , a : int , a : List[str] , a : Dict , a : List[Any] , a : Optional[Any]="train" , a : Tuple=None , a : List[str]=None , a : Optional[int]=None , a : List[str]="" , ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE = Path(a ).joinpath(type_path + """.source""" )
SCREAMING_SNAKE_CASE = Path(a ).joinpath(type_path + """.target""" )
SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE = max_source_length
SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self : Union[str, Any] ) -> Dict:
return len(self.src_lens )
def __getitem__( self : Any , a : Optional[int] ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip("""\n""" )
SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) , a ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
SCREAMING_SNAKE_CASE = encode_line(a , a , self.max_source_length , """right""" )
SCREAMING_SNAKE_CASE = encode_line(a , a , self.max_target_length , """right""" )
SCREAMING_SNAKE_CASE = source_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = target_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( a : Optional[int] ) -> List[Any]:
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCAmelCase ( self : Optional[int] , a : List[Any] ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE = torch.stack([x["""input_ids"""] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x["""attention_mask"""] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x["""decoder_input_ids"""] for x in batch] )
SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = trim_batch(a , a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = trim_batch(a , a , attention_mask=a )
SCREAMING_SNAKE_CASE = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : Tuple = getLogger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_git_info()
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , """git_log.json""" ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = {
"""repo_id""": str(SCREAMING_SNAKE_CASE ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return list(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , """wb""" ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def remove_articles(SCREAMING_SNAKE_CASE ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = normalize_answer(SCREAMING_SNAKE_CASE ).split()
SCREAMING_SNAKE_CASE = normalize_answer(SCREAMING_SNAKE_CASE ).split()
SCREAMING_SNAKE_CASE = Counter(SCREAMING_SNAKE_CASE ) & Counter(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
em += exact_match_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
em /= len(SCREAMING_SNAKE_CASE )
return {"em": em}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE = """dropout_rate"""
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not hasattr(SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
SCREAMING_SNAKE_CASE = p if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return hparams, config
| 450
| 0
|
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = None
__magic_name__ :Union[str, Any] = None
@property
def snake_case ( self ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'padding_value' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCAmelCase__ :Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ :Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Tuple = feat_extract.model_input_names[0]
lowerCAmelCase__ :int = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCAmelCase__ :List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ :Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :str = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCAmelCase__ :Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ :Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , __UpperCAmelCase=False ):
'''simple docstring'''
def _inputs_have_equal_length(__UpperCAmelCase ):
lowerCAmelCase__ :int = len(input[0] )
for input_slice in input[1:]:
if len(__UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
if not np.allclose(np.asarray(__UpperCAmelCase ) , np.asarray(__UpperCAmelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :int = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :List[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :List[Any] = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ :List[str] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ :Tuple = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.batch_size
lowerCAmelCase__ :List[str] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ :Tuple = feat_extract.pad(__UpperCAmelCase , padding=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = input_a[input_name]
lowerCAmelCase__ :List[str] = feat_extract.pad(__UpperCAmelCase , padding='longest' )
lowerCAmelCase__ :Dict = input_a[input_name]
lowerCAmelCase__ :Optional[int] = feat_extract.pad(__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ :Union[str, Any] = input_a[input_name]
lowerCAmelCase__ :Any = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )
lowerCAmelCase__ :Dict = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='max_length' )[input_name]
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :str = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ :Any = feat_extract.pad(__UpperCAmelCase , pad_to_multiple_of=1_0 )
lowerCAmelCase__ :List[str] = input_a[input_name]
lowerCAmelCase__ :str = feat_extract.pad(__UpperCAmelCase , padding='longest' , pad_to_multiple_of=1_0 )
lowerCAmelCase__ :Tuple = input_a[input_name]
lowerCAmelCase__ :List[str] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Any = input_a[input_name]
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=__UpperCAmelCase , return_tensors='np' , )
lowerCAmelCase__ :Union[str, Any] = input_a[input_name]
self.assertTrue(all(len(__UpperCAmelCase ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :Any = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(__UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ :List[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def snake_case ( self , __UpperCAmelCase=False ):
'''simple docstring'''
def _inputs_have_equal_length(__UpperCAmelCase ):
lowerCAmelCase__ :List[str] = len(input[0] )
for input_slice in input[1:]:
if len(__UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
if not np.allclose(np.asarray(__UpperCAmelCase ) , np.asarray(__UpperCAmelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ :Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = input_a[input_name]
lowerCAmelCase__ :Optional[Any] = feat_extract.pad(__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ :Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
# truncate to smallest with np
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=__UpperCAmelCase , )
lowerCAmelCase__ :List[Any] = input_a[input_name]
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCAmelCase__ :Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
# truncate to middle
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__UpperCAmelCase , return_tensors='np' , )
lowerCAmelCase__ :str = input_a[input_name]
lowerCAmelCase__ :Dict = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = input_a[input_name]
lowerCAmelCase__ :Union[str, Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCAmelCase__ :Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCAmelCase , __UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , truncation=__UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='longest' , truncation=__UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='longest' , truncation=__UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__UpperCAmelCase ):
feat_extract.pad(__UpperCAmelCase , padding='max_length' , truncation=__UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ :Dict = 1_2
lowerCAmelCase__ :Any = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCAmelCase , truncation=__UpperCAmelCase , )
lowerCAmelCase__ :Dict = input_a[input_name]
lowerCAmelCase__ :Dict = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCAmelCase , )
lowerCAmelCase__ :List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ :Optional[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ :Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
self._check_padding(numpify=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._check_padding(numpify=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._check_truncation(numpify=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._check_truncation(numpify=__UpperCAmelCase )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase__ :Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :int = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ :Tuple = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :int = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :Optional[int] = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ :Tuple = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.feat_extract_dict
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :List[str] = self.feature_extraction_class(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :int = [len(__UpperCAmelCase ) for x in speech_inputs]
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :Dict = feat_extract.pad(__UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feat_extract_dict
lowerCAmelCase__ :int = True
lowerCAmelCase__ :Any = self.feature_extraction_class(**__UpperCAmelCase )
lowerCAmelCase__ :Any = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ :List[str] = [len(__UpperCAmelCase ) for x in speech_inputs]
lowerCAmelCase__ :List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ :int = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ :int = min(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feat_extract.pad(
__UpperCAmelCase , padding='max_length' , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 93
|
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 0
|
import os
def __lowerCAmelCase ( ):
__lowerCAmelCase = os.path.join(os.path.dirname(__snake_case ) , "num.txt" )
with open(__snake_case ) as file_hand:
return str(sum(int(__snake_case ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 290
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _UpperCamelCase (a_ ):
snake_case_ = """instructblip_vision_model"""
def __init__( self , __UpperCamelCase=1_4_0_8 , __UpperCamelCase=6_1_4_4 , __UpperCamelCase=3_9 , __UpperCamelCase=1_6 , __UpperCamelCase=2_2_4 , __UpperCamelCase=1_4 , __UpperCamelCase="gelu" , __UpperCamelCase=1e-6 , __UpperCamelCase=0.0 , __UpperCamelCase=1e-10 , __UpperCamelCase=True , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = patch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = hidden_act
__lowerCAmelCase = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__lowerCAmelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _UpperCamelCase (a_ ):
snake_case_ = """instructblip_qformer"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=2 , __UpperCamelCase=1_4_0_8 , **__UpperCamelCase , )-> Any:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = cross_attention_frequency
__lowerCAmelCase = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__lowerCAmelCase = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _UpperCamelCase (a_ ):
snake_case_ = """instructblip"""
snake_case_ = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=3_2 , **__UpperCamelCase )-> Tuple:
super().__init__(**__UpperCamelCase )
if vision_config is None:
__lowerCAmelCase = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
__lowerCAmelCase = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
__lowerCAmelCase = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
__lowerCAmelCase = InstructBlipVisionConfig(**__UpperCamelCase )
__lowerCAmelCase = InstructBlipQFormerConfig(**__UpperCamelCase )
__lowerCAmelCase = text_config["model_type"] if "model_type" in text_config else "opt"
__lowerCAmelCase = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
__lowerCAmelCase = self.text_config.tie_word_embeddings
__lowerCAmelCase = self.text_config.is_encoder_decoder
__lowerCAmelCase = num_query_tokens
__lowerCAmelCase = self.vision_config.hidden_size
__lowerCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowerCAmelCase = 1.0
__lowerCAmelCase = 0.0_2
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> int:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCamelCase , )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.qformer_config.to_dict()
__lowerCAmelCase = self.text_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 290
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : int ) -> Any:
_A = [[] for _ in range(UpperCamelCase__ )]
_A = size
def __getitem__( self : Dict, UpperCamelCase__ : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
return self._size
def __UpperCAmelCase ( self : str, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ) -> List[Any]:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCamelCase__, UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : int, UpperCamelCase__ : int ) -> int | None:
_A = deque([start_vertex] )
_A = [None] * self.size
_A = 0
while queue:
_A = queue.popleft()
_A = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_A = current_distance + edge.weight
_A = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase__, UpperCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
_A = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase )-> list:
__UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__UpperCAmelCase = True
for i in range(0 , len(_lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase , __UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
for i in range(1 , len(_lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase , __UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_A: Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
_A: str = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 617
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _lowerCAmelCase ( _lowerCAmelCase = 3 )-> qiskit.result.counts.Counts:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_lowerCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCAmelCase = QuantumRegister(_lowerCAmelCase , 'qr' )
__UpperCAmelCase = ClassicalRegister(_lowerCAmelCase , 'cr' )
__UpperCAmelCase = QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = number_of_qubits
for i in range(_lowerCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCAmelCase , _lowerCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCAmelCase , _lowerCAmelCase )
# simulate with 10000 shots
__UpperCAmelCase = Aer.get_backend('qasm_simulator' )
__UpperCAmelCase = execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_00_00 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 617
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase__ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] ='''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
__SCREAMING_SNAKE_CASE : List[str] ='''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
__SCREAMING_SNAKE_CASE : Tuple ='''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Optional[int]=None , snake_case__ : Tuple=True , snake_case__ : Optional[int]=False ):
if rouge_types is None:
lowercase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
lowercase = rouge_scorer.RougeScorer(rouge_types=snake_case__ , use_stemmer=snake_case__ )
if use_aggregator:
lowercase = scoring.BootstrapAggregator()
else:
lowercase = []
for ref, pred in zip(snake_case__ , snake_case__ ):
lowercase = scorer.score(snake_case__ , snake_case__ )
if use_aggregator:
aggregator.add_scores(snake_case__ )
else:
scores.append(snake_case__ )
if use_aggregator:
lowercase = aggregator.aggregate()
else:
lowercase = {}
for key in scores[0]:
lowercase = [score[key] for score in scores]
return result
| 715
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
_A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
class A_ ( __a ):
_A :List[str] = False
class A_ ( __a ):
_A :Dict = False
class A_ ( __a ):
_A :Union[str, Any] = '''AutoFeatureExtractor'''
_A :Tuple = '''AutoTokenizer'''
_A :Optional[Any] = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A_ ( unittest.TestCase ):
_A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
lowercase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 72
| 0
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = str(_lowercase )
return n == n[::-1]
def __snake_case ( _lowercase = 100_0000 ):
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 ,_lowercase ):
if is_palindrome(_lowercase ) and is_palindrome(bin(_lowercase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 34
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ="""unispeech-sat"""
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=128 , lowerCAmelCase__=16 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=320 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=100 , lowerCAmelCase__=256 , lowerCAmelCase__=256 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=504 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
a : Any = hidden_size
a : Tuple = feat_extract_norm
a : Tuple = feat_extract_activation
a : Dict = list(lowerCAmelCase__ )
a : int = list(lowerCAmelCase__ )
a : Optional[Any] = list(lowerCAmelCase__ )
a : int = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[int] = len(self.conv_dim )
a : int = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : List[Any] = num_attention_heads
a : Any = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Tuple = activation_dropout
a : Dict = feat_proj_dropout
a : Optional[Any] = final_dropout
a : Union[str, Any] = layerdrop
a : str = layer_norm_eps
a : Optional[int] = initializer_range
a : Optional[int] = vocab_size
a : str = num_clusters
a : Any = do_stable_layer_norm
a : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[Any] = apply_spec_augment
a : int = mask_time_prob
a : Optional[int] = mask_time_length
a : Dict = mask_time_min_masks
a : Optional[int] = mask_feature_prob
a : List[str] = mask_feature_length
a : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a : List[str] = num_codevectors_per_group
a : List[Any] = num_codevector_groups
a : Tuple = contrastive_logits_temperature
a : int = feat_quantizer_dropout
a : Optional[Any] = num_negatives
a : Optional[int] = codevector_dim
a : Tuple = proj_codevector_dim
a : Optional[int] = diversity_loss_weight
# ctc loss
a : Dict = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : Union[str, Any] = list(lowerCAmelCase__ )
a : List[str] = list(lowerCAmelCase__ )
a : Tuple = list(lowerCAmelCase__ )
a : Optional[int] = xvector_output_dim
@property
def __a ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 633
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = None
class UpperCamelCase_ (__A , __A ):
__magic_name__ = 2
@register_to_config
def __init__( self : Any , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Any = sigma_max
# setable values
UpperCAmelCase_ : int = None
UpperCAmelCase_ : np.IntTensor = None
UpperCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[int] = None ) -> torch.FloatTensor:
return sample
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None ) -> int:
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : Optional[Any] = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCAmelCase_ : List[str] = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa , device=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase_ ).to(sample.device )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
UpperCAmelCase_ : int = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
UpperCAmelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Dict = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ) -> Tuple:
raise NotImplementedError()
| 704
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def snake_case ( A__ ,A__ ,A__=[] ):
UpperCAmelCase_ : int = size[0] - overlap_pixels * 2
UpperCAmelCase_ : Dict = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase_ : Union[str, Any] = np.ones((size_y, size_x) ,dtype=np.uinta ) * 2_55
UpperCAmelCase_ : Optional[int] = np.pad(A__ ,mode="linear_ramp" ,pad_width=A__ ,end_values=0 )
if "l" in remove_borders:
UpperCAmelCase_ : List[str] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase_ : Any = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase_ : Optional[int] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase_ : Any = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def snake_case ( A__ ,A__ ,A__ ):
return max(A__ ,min(A__ ,A__ ) )
def snake_case ( A__ ,A__ ,A__ ):
return (
clamp(rect[0] ,min[0] ,max[0] ),
clamp(rect[1] ,min[1] ,max[1] ),
clamp(rect[2] ,min[0] ,max[0] ),
clamp(rect[3] ,min[1] ,max[1] ),
)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = list(A__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase_ : Optional[Any] = clamp_rect(A__ ,[0, 0] ,[image_size[0], image_size[1]] )
return rect
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : Dict = Image.new("RGB" ,(tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) ,Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) ,(0, 0) ,)
result.paste(A__ ,(original_slice, 0) )
return result
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase_ : Optional[int] = tile.crop(A__ )
return tile
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[str] = n % d
return n - divisor
class UpperCamelCase_ (__A ):
def __init__( self : List[str] , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : int = 350 , ) -> Optional[Any]:
super().__init__(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , max_noise_level=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase_ : Dict = add_overlap_rect(lowerCAmelCase_ , lowerCAmelCase_ , image.size )
UpperCAmelCase_ : Tuple = image.crop(lowerCAmelCase_ )
UpperCAmelCase_ : Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase_ : int = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase_ : Any = max(0 , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = squeeze_tile(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = to_input.size
UpperCAmelCase_ : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase_ : Optional[Any] = super(lowerCAmelCase_ , self ).__call__(image=lowerCAmelCase_ , **lowerCAmelCase_ ).images[0]
UpperCAmelCase_ : Union[str, Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase_ : List[Any] = unsqueeze_tile(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase_ : List[Any] = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
UpperCAmelCase_ : str = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCAmelCase_ ) , mode="L" , )
final_image.paste(
lowerCAmelCase_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : int , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCAmelCase_ : int = 75 , lowerCAmelCase_ : float = 9.0 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 128 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , ) -> int:
UpperCAmelCase_ : List[Any] = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase_ : Any = math.ceil(image.size[0] / tile_size )
UpperCAmelCase_ : str = math.ceil(image.size[1] / tile_size )
UpperCAmelCase_ : Any = tcx * tcy
UpperCAmelCase_ : List[Any] = 0
for y in range(lowerCAmelCase_ ):
for x in range(lowerCAmelCase_ ):
self._process_tile(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prompt=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , noise_level=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def snake_case ( ):
# Run a demo
UpperCAmelCase_ : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(A__ ,revision="fp16" ,torch_dtype=torch.floataa )
UpperCAmelCase_ : Optional[int] = pipe.to("cuda" )
UpperCAmelCase_ : Tuple = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(A__ ):
print(F"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
UpperCAmelCase_ : Union[str, Any] = pipe(image=A__ ,prompt="Black font, white background, vector" ,noise_level=40 ,callback=A__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 463
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
# setable values
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
return cls(common=_lowerCAmelCase ,init_noise_sigma=_lowerCAmelCase ,timesteps=_lowerCAmelCase )
@dataclass
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
class UpperCamelCase__ (a ,a ):
'''simple docstring'''
_UpperCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCamelCase = 42
@property
def UpperCamelCase_ ( self ):
return True
@register_to_config
def __init__( self ,_lowerCAmelCase = 10_00 ,_lowerCAmelCase = 0.0001 ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = "linear" ,_lowerCAmelCase = None ,_lowerCAmelCase = "fixed_small" ,_lowerCAmelCase = True ,_lowerCAmelCase = "epsilon" ,_lowerCAmelCase = jnp.floataa ,):
lowerCamelCase__ = dtype
def UpperCamelCase_ ( self ,_lowerCAmelCase = None ):
if common is None:
lowerCamelCase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase__ = jnp.array(1.0 ,dtype=self.dtype )
lowerCamelCase__ = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowerCAmelCase ,init_noise_sigma=_lowerCAmelCase ,timesteps=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ):
return sample
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = () ):
lowerCamelCase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase__ = (jnp.arange(0 ,_lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowerCAmelCase ,timesteps=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
lowerCamelCase__ = state.common.alphas_cumprod[t]
lowerCamelCase__ = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase__ = jnp.clip(_lowerCAmelCase ,a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase__ = jnp.log(jnp.clip(_lowerCAmelCase ,a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase__ = variance
lowerCamelCase__ = state.common.betas[t]
lowerCamelCase__ = (predicted_variance + 1) / 2
lowerCamelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,):
lowerCamelCase__ = timestep
if key is None:
lowerCamelCase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ = jnp.split(_lowerCAmelCase ,sample.shape[1] ,axis=1 )
else:
lowerCamelCase__ = None
# 1. compute alphas, betas
lowerCamelCase__ = state.common.alphas_cumprod[t]
lowerCamelCase__ = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
lowerCamelCase__ = 1 - alpha_prod_t
lowerCamelCase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ = jnp.clip(_lowerCAmelCase ,-1 ,1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,num=1 )
lowerCamelCase__ = jax.random.normal(_lowerCAmelCase ,shape=model_output.shape ,dtype=self.dtype )
return (self._get_variance(_lowerCAmelCase ,_lowerCAmelCase ,predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
lowerCamelCase__ = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) )
lowerCamelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowerCAmelCase ,state=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
return add_noise_common(state.common ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
return get_velocity_common(state.common ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 50
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class __a :
__UpperCamelCase : List[str] = list_field(
default=[], metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
}, )
__UpperCamelCase : List[int] = list_field(
default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Use FP16 to accelerate inference.'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Benchmark training of model'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Verbose memory tracing'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
}, )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Trace memory line by line'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Save result to a CSV file'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Save all print statements in a log file'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Whether to print environment information'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
}, )
__UpperCamelCase : str = field(
default=F'inference_time_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving time results to csv.'}, )
__UpperCamelCase : str = field(
default=F'inference_memory_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving memory results to csv.'}, )
__UpperCamelCase : str = field(
default=F'train_time_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving time results to csv for training.'}, )
__UpperCamelCase : str = field(
default=F'train_memory_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving memory results to csv for training.'}, )
__UpperCamelCase : str = field(
default=F'env_info_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving environment information.'}, )
__UpperCamelCase : str = field(
default=F'log_{round(time() )}.csv', metadata={'help': 'Log filename used if print statements are saved in log.'}, )
__UpperCamelCase : int = field(default=3, metadata={'help': 'Times an experiment will be run.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
}, )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase ,)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 109
| 0
|
"""simple docstring"""
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
_lowerCamelCase : List[str] = create_tensor(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = gather(SCREAMING_SNAKE_CASE_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : Union[str, Any] = [state.process_index]
_lowerCamelCase : Optional[int] = gather_object(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == state.num_processes, F'''{gathered_obj}, {len(SCREAMING_SNAKE_CASE_ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : List[str] = create_tensor(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = broadcast(SCREAMING_SNAKE_CASE_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_lowerCamelCase : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_lowerCamelCase : Dict = torch.arange(state.num_processes ).to(state.device )
_lowerCamelCase : Dict = pad_across_processes(SCREAMING_SNAKE_CASE_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_lowerCamelCase : str = create_tensor(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = reduce(SCREAMING_SNAKE_CASE_ , '''sum''' )
_lowerCamelCase : List[str] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_lowerCamelCase : str = create_tensor(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Union[str, Any] = reduce(SCREAMING_SNAKE_CASE_ , '''mean''' )
_lowerCamelCase : Union[str, Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
# For xla_spawn (TPUs)
main()
def UpperCamelCase ( ) ->Any:
_lowerCamelCase : Any = PartialState()
state.print(F'''State: {state}''' )
state.print('''testing gather''' )
test_gather(SCREAMING_SNAKE_CASE_ )
state.print('''testing gather_object''' )
test_gather_object(SCREAMING_SNAKE_CASE_ )
state.print('''testing broadcast''' )
test_broadcast(SCREAMING_SNAKE_CASE_ )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(SCREAMING_SNAKE_CASE_ )
state.print('''testing reduce_sum''' )
test_reduce_sum(SCREAMING_SNAKE_CASE_ )
state.print('''testing reduce_mean''' )
test_reduce_mean(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 701
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 558
| 0
|
'''simple docstring'''
from math import isqrt
def _snake_case ( A ) -> list[int]:
lowerCAmelCase__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A , A ):
lowerCAmelCase__ = False
return [i for i in range(2 , A ) if is_prime[i]]
def _snake_case ( A = 10**8 ) -> int:
lowerCAmelCase__ = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 90
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : str = n
lowerCamelCase : str = [None] * self.n
lowerCamelCase : Union[str, Any] = 0 # index of the first element
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Any = 0
def __len__( self ) -> int:
return self.size
def _lowercase ( self ) -> bool:
return self.size == 0
def _lowercase ( self ) -> Any:
return False if self.is_empty() else self.array[self.front]
def _lowercase ( self , UpperCamelCase__ ) -> List[Any]:
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCamelCase : Any = data
lowerCamelCase : int = (self.rear + 1) % self.n
self.size += 1
return self
def _lowercase ( self ) -> int:
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCamelCase : Any = self.array[self.front]
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 311
| 0
|
'''simple docstring'''
def snake_case__ ( _A: int , _A: bool = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCAmelCase = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowerCAmelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_A , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase = primes[:idx]
break
lowerCAmelCase , lowerCAmelCase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase = False
for r in range(_A ):
lowerCAmelCase = pow(_A , d * 2**r , _A )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def snake_case__ ( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 605
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowercase = threading.Lock()
__lowercase = None
__lowercase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__lowercase = logging.WARNING
__lowercase = True
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , _A )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def snake_case__ ( ) -> str:
'''simple docstring'''
return __name__.split(""".""" )[0]
def snake_case__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def snake_case__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase = False
def snake_case__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase = None
def snake_case__ ( ) -> Dict:
'''simple docstring'''
return log_levels
def snake_case__ ( _A: Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_A )
def snake_case__ ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def snake_case__ ( _A: int ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(_A )
def snake_case__ ( ) -> int:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(_A )
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def snake_case__ ( _A: logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_A )
def snake_case__ ( _A: logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_A )
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase = False
def snake_case__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase = True
def snake_case__ ( ) -> None:
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(_A )
def snake_case__ ( ) -> None:
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_A )
def snake_case__ ( self: str , *_A: Optional[int] , **_A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , _A )
if no_advisory_warnings:
return
self.warning(*_A , **_A )
__lowercase = warning_advice
@functools.lru_cache(_A )
def snake_case__ ( self: List[str] , *_A: List[Any] , **_A: str ) -> List[str]:
'''simple docstring'''
self.warning(*_A , **_A )
__lowercase = warning_once
class a__:
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self):
"""simple docstring"""
return iter(self._iterator)
def __getattr__( self , __lowerCAmelCase):
"""simple docstring"""
def empty_fn(*__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
"""simple docstring"""
return self
def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
return
class a__:
'''simple docstring'''
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase)
else:
return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowercase = _tqdm_cls()
def snake_case__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def snake_case__ ( ) -> Dict:
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def snake_case__ ( ) -> Any:
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 605
| 1
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase_ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
UpperCamelCase_ = F'''https://www.google.com/search?q={query}&num=100'''
UpperCamelCase_ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
UpperCamelCase_ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
UpperCamelCase_ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 256
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 1_0
def _UpperCAmelCase ( self : Any , **snake_case_ : Tuple ):
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**snake_case_ )
return config
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = 10
A : Dict = self.get_scheduler_config()
A : Optional[int] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
A : List[str] = scheduler.timesteps[0]
A : Any = scheduler.timesteps[1]
A : int = self.dummy_sample
A : str = 0.1 * sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**snake_case_ )
A : str = 1
scheduler.set_timesteps(snake_case_ )
A : Optional[int] = scheduler.timesteps
A : int = torch.manual_seed(0 )
A : Optional[Any] = self.dummy_model()
A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
A : Dict = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : Union[str, Any] = pred_prev_sample
A : List[str] = torch.sum(torch.abs(snake_case_ ) )
A : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : str = scheduler_class(**snake_case_ )
A : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
A : Optional[int] = scheduler.timesteps
A : Any = torch.manual_seed(0 )
A : Tuple = self.dummy_model()
A : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : str = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : str = pred_prev_sample
A : str = torch.sum(torch.abs(snake_case_ ) )
A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Optional[int] = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**snake_case_ )
A : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Tuple = scheduler_class(**snake_case_ )
A : Any = [39, 30, 12, 1, 0]
A : List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : List[Any] = scheduler_class(**snake_case_ )
A : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 256
| 1
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 534
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__: Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
snake_case_ : Optional[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser'},
] , )
snake_case_ : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
snake_case_ : Tuple = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
snake_case_ : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
] , )
snake_case_ : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
snake_case_ : Tuple = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_A , _A )
@slow
@require_torch
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_A )
@slow
@require_tf
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : Dict = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_A )
def UpperCAmelCase_ ( self : Dict , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
snake_case_ : List[Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
snake_case_ : Tuple = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_A ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
snake_case_ : Tuple = None
snake_case_ : str = None
self.run_pipeline_test(_A , [] )
@require_tf
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
snake_case_ : List[str] = None
snake_case_ : List[str] = None
self.run_pipeline_test(_A , [] )
def UpperCAmelCase_ ( self : List[str] , _A : List[Any] , _A : Tuple , _A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : Optional[Any] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def UpperCAmelCase_ ( self : Optional[Any] , _A : str , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = fill_masker.tokenizer
snake_case_ : List[Any] = fill_masker.model
snake_case_ : int = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Dict = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Optional[int] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_A , [
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
] , )
with self.assertRaises(_A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_A ):
fill_masker('This is' )
self.run_test_top_k(_A , _A )
self.run_test_targets(_A , _A )
self.run_test_top_k_targets(_A , _A )
self.fill_mask_with_duplicate_targets_and_top_k(_A , _A )
self.fill_mask_with_multiple_masks(_A , _A )
def UpperCAmelCase_ ( self : Optional[Any] , _A : Any , _A : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Dict = tokenizer.get_vocab()
snake_case_ : List[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A , targets=_A )
snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _A )
snake_case_ : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_A ) )
# Call argument
snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Any = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _A )
snake_case_ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_A ) )
# Score equivalence
snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A )
snake_case_ : Any = [top_mask['token_str'] for top_mask in outputs]
snake_case_ : Optional[Any] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ) == set(_A ):
snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A )
snake_case_ : Union[str, Any] = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
# Raises with invalid
with self.assertRaises(_A ):
snake_case_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_A ):
snake_case_ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_A ):
snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='' )
def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : str = FillMaskPipeline(model=_A , tokenizer=_A , top_k=2 )
snake_case_ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Any = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Dict ) -> str:
"""simple docstring"""
snake_case_ : str = tokenizer.get_vocab()
snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A )
# top_k=2, ntargets=3
snake_case_ : str = sorted(vocab.keys() )[:3]
snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ : Any = [el['token_str'] for el in sorted(_A , key=lambda _A : x["score"] , reverse=_A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ).issubset(_A ):
snake_case_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def UpperCAmelCase_ ( self : str , _A : Dict , _A : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ : str = sorted(vocab.keys() )[:3]
snake_case_ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ : str = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_A , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_A ) , 3 )
def UpperCAmelCase_ ( self : List[str] , _A : str , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : List[str] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_A , [
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
] , )
| 534
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger('''transformers.models.speecht5''')
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : int ) -> Any:
'''simple docstring'''
hf_model.apply_weight_norm()
snake_case : Any = checkpoint["""input_conv.weight_g"""]
snake_case : Any = checkpoint["""input_conv.weight_v"""]
snake_case : Optional[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
snake_case : Optional[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
snake_case : List[str] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
snake_case : Any = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case : Union[str, Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case : str = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case : Union[str, Any] = checkpoint["""output_conv.1.weight_g"""]
snake_case : Tuple = checkpoint["""output_conv.1.weight_v"""]
snake_case : Dict = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase ( __A : List[Any] , __A : Optional[Any] , __A : List[str] , __A : Optional[Any]=None , __A : List[str]=None , ) -> str:
'''simple docstring'''
if config_path is not None:
snake_case : str = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
snake_case : Tuple = SpeechTaHifiGanConfig()
snake_case : Optional[Any] = SpeechTaHifiGan(__A )
snake_case : str = torch.load(__A )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __A , __A )
snake_case : List[Any] = np.load(__A )
snake_case : Tuple = stats[0].reshape(-1 )
snake_case : str = stats[1].reshape(-1 )
snake_case : Optional[int] = torch.from_numpy(__A ).float()
snake_case : Tuple = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__A )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowercase : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = "swin"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=2_2_4 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=9_6 , lowerCAmelCase_ : List[str]=[2, 2, 6, 2] , lowerCAmelCase_ : Dict=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Union[str, Any]=4.0 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : List[str]=1E-5 , lowerCAmelCase_ : Optional[Any]=3_2 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = len(lowerCAmelCase_)
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ = int(embed_dim * 2 ** (len(lowerCAmelCase_) - 1))
lowercase_ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_) + 1)]
lowercase_ , lowercase_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = version.parse("1.11" )
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return 1E-4
| 100
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowercase_ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowercase_ = xgb.predict(__lowerCAmelCase )
lowercase_ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
lowercase_ = fetch_california_housing()
lowercase_ , lowercase_ = data_handling(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowercase_ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 100
| 1
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Dict = tau * frequency / samplerate
_lowerCAmelCase : Any = sin(_lowerCamelCase )
_lowerCAmelCase : Any = cos(_lowerCamelCase )
_lowerCAmelCase : Tuple = _sin / (2 * q_factor)
_lowerCAmelCase : Dict = (1 - _cos) / 2
_lowerCAmelCase : Optional[int] = 1 - _cos
_lowerCAmelCase : int = 1 + alpha
_lowerCAmelCase : Dict = -2 * _cos
_lowerCAmelCase : Optional[int] = 1 - alpha
_lowerCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tau * frequency / samplerate
_lowerCAmelCase : Optional[Any] = sin(_lowerCamelCase )
_lowerCAmelCase : List[Any] = cos(_lowerCamelCase )
_lowerCAmelCase : int = _sin / (2 * q_factor)
_lowerCAmelCase : Tuple = (1 + _cos) / 2
_lowerCAmelCase : Dict = -1 - _cos
_lowerCAmelCase : Union[str, Any] = 1 + alpha
_lowerCAmelCase : int = -2 * _cos
_lowerCAmelCase : Tuple = 1 - alpha
_lowerCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tau * frequency / samplerate
_lowerCAmelCase : List[Any] = sin(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = cos(_lowerCamelCase )
_lowerCAmelCase : int = _sin / (2 * q_factor)
_lowerCAmelCase : Optional[int] = _sin / 2
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = -ba
_lowerCAmelCase : str = 1 + alpha
_lowerCAmelCase : Tuple = -2 * _cos
_lowerCAmelCase : List[Any] = 1 - alpha
_lowerCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Dict = tau * frequency / samplerate
_lowerCAmelCase : List[Any] = sin(_lowerCamelCase )
_lowerCAmelCase : Tuple = cos(_lowerCamelCase )
_lowerCAmelCase : str = _sin / (2 * q_factor)
_lowerCAmelCase : Dict = 1 - alpha
_lowerCAmelCase : str = -2 * _cos
_lowerCAmelCase : Optional[int] = 1 + alpha
_lowerCAmelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = tau * frequency / samplerate
_lowerCAmelCase : Any = sin(_lowerCamelCase )
_lowerCAmelCase : str = cos(_lowerCamelCase )
_lowerCAmelCase : Dict = _sin / (2 * q_factor)
_lowerCAmelCase : List[str] = 10 ** (gain_db / 40)
_lowerCAmelCase : List[str] = 1 + alpha * big_a
_lowerCAmelCase : Optional[Any] = -2 * _cos
_lowerCAmelCase : List[str] = 1 - alpha * big_a
_lowerCAmelCase : Optional[Any] = 1 + alpha / big_a
_lowerCAmelCase : Any = -2 * _cos
_lowerCAmelCase : List[str] = 1 - alpha / big_a
_lowerCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase : int = tau * frequency / samplerate
_lowerCAmelCase : Union[str, Any] = sin(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = cos(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_lowerCAmelCase : List[Any] = 10 ** (gain_db / 40)
_lowerCAmelCase : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase : List[str] = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase : Dict = 2 * sqrt(_lowerCamelCase ) * alpha
_lowerCAmelCase : Union[str, Any] = big_a * (pmc + aaa)
_lowerCAmelCase : Tuple = 2 * big_a * mpc
_lowerCAmelCase : Optional[Any] = big_a * (pmc - aaa)
_lowerCAmelCase : List[Any] = ppmc + aaa
_lowerCAmelCase : Dict = -2 * pmpc
_lowerCAmelCase : List[str] = ppmc - aaa
_lowerCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tau * frequency / samplerate
_lowerCAmelCase : List[str] = sin(_lowerCamelCase )
_lowerCAmelCase : List[str] = cos(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_lowerCAmelCase : List[Any] = 10 ** (gain_db / 40)
_lowerCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase : int = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase : int = 2 * sqrt(_lowerCamelCase ) * alpha
_lowerCAmelCase : Optional[int] = big_a * (ppmc + aaa)
_lowerCAmelCase : Tuple = -2 * big_a * pmpc
_lowerCAmelCase : List[Any] = big_a * (ppmc - aaa)
_lowerCAmelCase : Union[str, Any] = pmc + aaa
_lowerCAmelCase : Any = 2 * mpc
_lowerCAmelCase : Any = pmc - aaa
_lowerCAmelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 259
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "swin"
_UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,_A=224 ,_A=4 ,_A=3 ,_A=96 ,_A=[2, 2, 6, 2] ,_A=[3, 6, 12, 24] ,_A=7 ,_A=4.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,_A=32 ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Union[str, Any] = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Any = len(_A )
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
_lowerCAmelCase : List[str] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 259
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase__ ( a__="" ) ->str:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
return os.path.join(a__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.rand(12 , dtype=torch.floataa) - 0.5
_UpperCamelCase = AgentAudio(lowercase_)
_UpperCamelCase = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_))
# Ensure that the file contains the same value as the original tensor
_UpperCamelCase , _UpperCamelCase = sf.read(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_) , atol=1e-4))
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = torch.rand(12 , dtype=torch.floataa) - 0.5
_UpperCamelCase = get_new_path(suffix=".wav")
sf.write(lowercase_ , lowercase_ , 16000)
_UpperCamelCase = AgentAudio(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4))
self.assertEqual(agent_type.to_string() , lowercase_)
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.randint(0 , 256 , (64, 64, 3))
_UpperCamelCase = AgentImage(lowercase_)
_UpperCamelCase = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4))
self.assertIsInstance(agent_type.to_raw() , Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_UpperCamelCase = Image.open(lowercase_)
_UpperCamelCase = AgentImage(lowercase_)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_UpperCamelCase = Image.open(lowercase_)
_UpperCamelCase = AgentImage(lowercase_)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = "Hey!"
_UpperCamelCase = AgentText(lowercase_)
self.assertEqual(lowercase_ , agent_type.to_string())
self.assertEqual(lowercase_ , agent_type.to_raw())
self.assertEqual(lowercase_ , lowercase_)
| 82
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82
| 1
|
def UpperCamelCase_ ( __a , __a , __a=False ) -> Optional[int]:
if isinstance(__a , __a ) and isinstance(__a , __a ):
a__ : Union[str, Any] = len(set_a.intersection(__a ) )
if alternative_union:
a__ : List[Any] = len(__a ) + len(__a )
else:
a__ : List[str] = len(set_a.union(__a ) )
return intersection / union
if isinstance(__a , (list, tuple) ) and isinstance(__a , (list, tuple) ):
a__ : Tuple = [element for element in set_a if element in set_b]
if alternative_union:
a__ : Tuple = len(__a ) + len(__a )
return len(__a ) / union
else:
a__ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(__a ) / len(__a )
return len(__a ) / len(__a )
return None
if __name__ == "__main__":
UpperCamelCase : int = {"""a""", """b""", """c""", """d""", """e"""}
UpperCamelCase : Union[str, Any] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 37
|
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ , UpperCAmelCase__: int = [], []
while len(SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase__ , UpperCAmelCase__: str = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
start.append(SCREAMING_SNAKE_CASE )
end.append(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] =input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 113
| 0
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
lowercase__: Optional[Any] = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
lowercase__: str = 6
lowercase__: Tuple = 1_2_8
lowercase__: Any = (2, 2, 1_8, 2)
lowercase__: Tuple = (4, 8, 1_6, 3_2)
elif "large" in model_name:
lowercase__: Dict = 1_2
lowercase__: int = 1_9_2
lowercase__: Any = (2, 2, 1_8, 2)
lowercase__: Union[str, Any] = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase__: Any = window_size
lowercase__: Tuple = embed_dim
lowercase__: Any = depths
lowercase__: List[str] = num_heads
return config
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
if "encoder.mask_token" in name:
lowercase__: Tuple = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase__: int = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase__: Union[str, Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase__: Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__: Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__: int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__: Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__: Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__: Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase__: List[Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase__: Tuple = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase__: str = '''swin.''' + name
return name
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
for key in orig_state_dict.copy().keys():
lowercase__: Optional[Any] = orig_state_dict.pop(__UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase__: Optional[Any] = key.split('''.''' )
lowercase__: Dict = int(key_split[2] )
lowercase__: List[str] = int(key_split[4] )
lowercase__: Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__: Any = val[:dim, :]
lowercase__: Any = val[
dim : dim * 2, :
]
lowercase__: str = val[-dim:, :]
else:
lowercase__: Any = val[
:dim
]
lowercase__: Union[str, Any] = val[
dim : dim * 2
]
lowercase__: Dict = val[
-dim:
]
else:
lowercase__: str = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: str = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model''']
lowercase__: List[str] = get_swin_config(__UpperCAmelCase )
lowercase__: List[Any] = SwinForMaskedImageModeling(__UpperCAmelCase )
model.eval()
lowercase__: Any = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
lowercase__: Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__: List[Any] = ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
lowercase__: int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
lowercase__: List[str] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
lowercase__: Optional[Any] = model(**__UpperCAmelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 586
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "swin2sr"
_UpperCAmelCase :Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _UpperCAmelCase=64 , _UpperCAmelCase=1 , _UpperCAmelCase=3 , _UpperCAmelCase=180 , _UpperCAmelCase=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase=8 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=2 , _UpperCAmelCase=1.0 , _UpperCAmelCase="1conv" , _UpperCAmelCase="pixelshuffle" , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = image_size
lowercase__: List[Any] = patch_size
lowercase__: Union[str, Any] = num_channels
lowercase__: List[str] = embed_dim
lowercase__: Optional[int] = depths
lowercase__: Union[str, Any] = len(_UpperCAmelCase )
lowercase__: Union[str, Any] = num_heads
lowercase__: str = window_size
lowercase__: List[Any] = mlp_ratio
lowercase__: List[Any] = qkv_bias
lowercase__: List[str] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Any = drop_path_rate
lowercase__: str = hidden_act
lowercase__: List[str] = use_absolute_embeddings
lowercase__: Tuple = layer_norm_eps
lowercase__: Optional[int] = initializer_range
lowercase__: Tuple = upscale
lowercase__: Optional[int] = img_range
lowercase__: List[Any] = resi_connection
lowercase__: Union[str, Any] = upsampler
| 586
| 1
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return round(float(moles / volume ) * nfactor )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16_000 ) -> Union[str, Any]:
A__ = int(round(sample_rate * max_length ) )
if len(__UpperCamelCase ) <= sample_length:
return wav
A__ = randint(0 , len(__UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"help": "Name of a dataset from the datasets package"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "A file containing the training audio paths and labels."} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "A file containing the validation audio paths and labels."} )
A__ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A__ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
A__ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
A__ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
A__ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A__ : float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
A__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A__ : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _a ( self : Dict ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , _snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def A ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A__ = feature_extractor.model_input_names[0]
def train_transforms(__UpperCamelCase ):
A__ = []
for audio in batch[data_args.audio_column_name]:
A__ = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__UpperCamelCase )
A__ = feature_extractor(__UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
A__ = {model_input_name: inputs.get(__UpperCamelCase )}
A__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__UpperCamelCase ):
A__ = [audio['array'] for audio in batch[data_args.audio_column_name]]
A__ = feature_extractor(__UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
A__ = {model_input_name: inputs.get(__UpperCamelCase )}
A__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A__ = raw_datasets['train'].features[data_args.label_column_name].names
A__ , A__ = {}, {}
for i, label in enumerate(__UpperCamelCase ):
A__ = str(__UpperCamelCase )
A__ = label
# Load the accuracy metric from the datasets package
A__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase ):
A__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__UpperCamelCase , references=eval_pred.label_ids )
A__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel=__UpperCamelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A__ = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__UpperCamelCase , output_all_columns=__UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A__ = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__UpperCamelCase , output_all_columns=__UpperCamelCase )
# Initialize our trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ = trainer.evaluate()
trainer.log_metrics('eval' , __UpperCamelCase )
trainer.save_metrics('eval' , __UpperCamelCase )
# Write model card and (optionally) push to hub
A__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 9
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__a : Optional[int] = pytest.mark.integration
__a : List[str] = {'''comet'''}
__a : Union[str, Any] = importlib.util.find_spec('''fairseq''') is not None
__a : Tuple = {'''code_eval'''}
__a : Any = os.name == '''nt'''
__a : Dict = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__a : Optional[int] = importlib.util.find_spec('''transformers''') is not None
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self ,SCREAMING_SNAKE_CASE_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self ,SCREAMING_SNAKE_CASE_ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self ,SCREAMING_SNAKE_CASE_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self ,SCREAMING_SNAKE_CASE_ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self ,SCREAMING_SNAKE_CASE_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self ,SCREAMING_SNAKE_CASE_ )
return wrapper
def snake_case_ ( ) -> str:
lowercase__ : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case_ , snake_case_ , snake_case_ )
@local
class UpperCAmelCase( parameterized.TestCase ):
"""simple docstring"""
a : List[str] = {}
a : List[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __a ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
lowercase__ : int = "[...]"
lowercase__ : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCamelCase ) ).module_path )
lowercase__ : List[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase )
# check parameters
lowercase__ : int = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase__ : str = doctest.testmod(lowerCamelCase , verbose=lowerCamelCase , raise_on_error=lowerCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __a ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = "[...]"
lowercase__ : List[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase__ : List[str] = doctest.testmod(lowerCamelCase , verbose=lowerCamelCase , raise_on_error=lowerCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __a ( self , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase ):
yield
else:
yield
@contextmanager
def __a ( self ) -> int:
"""simple docstring"""
def load_local_metric(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ):
return load_metric(os.path.join("metrics" , lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase )
with patch("datasets.load_metric" ) as mock_load_metric:
lowercase__ : Any = load_local_metric
yield
@classmethod
def __a ( cls , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
def wrapper(lowerCamelCase ):
lowercase__ : Dict = contextmanager(lowerCamelCase )
lowercase__ : Tuple = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" ,"" ,"" ) # handle pytest cli flags
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __a ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowercase__ : Tuple = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowercase__ : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
def load_from_checkpoint(SCREAMING_SNAKE_CASE_ ):
class UpperCAmelCase:
"""simple docstring"""
def __a ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
assert len(lowerCamelCase ) == 2
lowercase__ : List[Any] = [0.19, 0.92]
return scores, sum(lowerCamelCase ) / len(lowerCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowercase__ : int = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowercase__ : Optional[int] = load_from_checkpoint
yield
def snake_case_ ( ) -> Tuple:
lowercase__ : Dict = load_metric(os.path.join("metrics" ,"seqeval" ) )
lowercase__ : str = "ERROR"
lowercase__ : Union[str, Any] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE_ ,match=re.escape(SCREAMING_SNAKE_CASE_ ) ):
metric.compute(predictions=[] ,references=[] ,scheme=SCREAMING_SNAKE_CASE_ )
| 397
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase : Optional[int] = datasets.logging.get_logger(__name__)
lowercase : List[Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowercase : List[str] = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowercase : Union[str, Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def A_ ( A__ , A__ , A__=False , A__=False , A__=True , A__=False , A__="dummy_doc" ) -> Tuple:
a__ : int = {doc: key_lines}
a__ : Optional[int] = {doc: sys_lines}
a__ : int = {}
a__ : Union[str, Any] = 0
a__ : Union[str, Any] = 0
a__ : Any = 0
a__ : List[Any] = 0
a__ : Union[str, Any] = 0
a__ : int = 0
a__ : Optional[int] = reader.get_doc_mentions(A__ , key_doc_lines[doc] , A__ )
key_singletons_num += singletons_num
if NP_only or min_span:
a__ : Dict = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
a__ : Optional[int] = reader.get_doc_mentions(A__ , sys_doc_lines[doc] , A__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
a__ : Dict = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
if remove_nested:
a__ : Union[str, Any] = reader.remove_nested_coref_mentions(A__ , A__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a__ : Tuple = reader.remove_nested_coref_mentions(A__ , A__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a__ : int = reader.get_mention_assignments(A__ , A__ )
a__ : int = reader.get_mention_assignments(A__ , A__ )
a__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
a__ : Dict = get_coref_infos(A__ , A__ , A__ , A__ , A__ , A__ )
a__ : int = {}
a__ : int = 0
a__ : List[Any] = 0
for name, metric in metrics:
a__ : Tuple = evaluator.evaluate_documents(A__ , A__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
a__ : Tuple = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def A_ ( A__ ) -> Optional[Any]:
a__ : Optional[Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a__ : Optional[int] = line.split()[5]
if not parse_col == "-":
a__ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Sequence(datasets.Value('string')),
}) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=True , lowercase=False , lowercase=False , lowercase=False) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a__ : Dict = util.check_gold_parse_annotation(lowercase)
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a__ : Any = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 717
|
from collections import namedtuple
lowercase : List[str] = namedtuple("""from_to""", """from_ to""")
lowercase : Tuple = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def A_ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392
| 0
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __UpperCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = None , ) -> Optional[Any]:
super().__init__()
a : int = initial_learning_rate
a : str = warmup_steps
a : Optional[Any] = power
a : int = decay_schedule_fn
a : str = name
def __call__( self , lowerCAmelCase__ ) -> Any:
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a : Dict = tf.cast(lowerCAmelCase__ , tf.floataa )
a : Tuple = tf.cast(self.warmup_steps , tf.floataa )
a : List[str] = global_step_float / warmup_steps_float
a : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCAmelCase__ , )
def __a ( self ) -> Dict:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : int , _lowercase : int , _lowercase : float = 0.0 , _lowercase : float = 0.9 , _lowercase : float = 0.999 , _lowercase : float = 1E-8 , _lowercase : Optional[float] = None , _lowercase : Optional[float] = None , _lowercase : float = 0.0 , _lowercase : float = 1.0 , _lowercase : Optional[List[str]] = None , ) ->Optional[int]:
'''simple docstring'''
a : Optional[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowercase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowercase , )
if num_warmup_steps:
a : Dict = WarmUp(
initial_learning_rate=_lowercase , decay_schedule_fn=_lowercase , warmup_steps=_lowercase , )
if weight_decay_rate > 0.0:
a : List[Any] = AdamWeightDecay(
learning_rate=_lowercase , weight_decay_rate=_lowercase , beta_a=_lowercase , beta_a=_lowercase , epsilon=_lowercase , clipnorm=_lowercase , global_clipnorm=_lowercase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_lowercase , )
else:
a : List[str] = tf.keras.optimizers.Adam(
learning_rate=_lowercase , beta_a=_lowercase , beta_a=_lowercase , epsilon=_lowercase , clipnorm=_lowercase , global_clipnorm=_lowercase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ = 0.001 , lowerCAmelCase__ = 0.9 , lowerCAmelCase__ = 0.999 , lowerCAmelCase__ = 1E-7 , lowerCAmelCase__ = False , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "AdamWeightDecay" , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a : str = weight_decay_rate
a : Union[str, Any] = include_in_weight_decay
a : Tuple = exclude_from_weight_decay
@classmethod
def __a ( cls , lowerCAmelCase__ ) -> int:
a : Tuple = {"WarmUp": WarmUp}
return super(lowerCAmelCase__ , cls ).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
super(lowerCAmelCase__ , self )._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : List[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> int:
a, a : int = list(zip(*lowerCAmelCase__ ) )
return super(lowerCAmelCase__ , self ).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , name=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a : Union[str, Any] = apply_state or {}
a : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a : Union[str, Any] = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Tuple:
a, a : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__ )
a : Optional[Any] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase__ , self )._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
a, a : str = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__ )
a : int = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase__ , self )._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> int:
a : Optional[Any] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ) is not None:
return False
return True
class __UpperCamelCase ( a__ ):
def __init__( self ) -> List[Any]:
a : List[Any] = []
a : int = None
@property
def __a ( self ) -> int:
if self._accum_steps is None:
a : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self ) -> int:
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowerCAmelCase__ ) -> Any:
if not self._gradients:
a : Optional[int] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__ ) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase__ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase__ )}""" )
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__ )
self._accum_steps.assign_add(1 )
def __a ( self ) -> Union[str, Any]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__ ) )
| 633
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->int:
'''simple docstring'''
a : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowercase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
a : Any = torch.load(hf_hub_download(repo_id=_lowercase , filename="pytorch_model.bin" ) )
a : int = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
a : List[Any] = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
a : Dict = tensor_value
a : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowercase , config=_lowercase , state_dict=_lowercase )
model.save_pretrained(_lowercase )
# convert tokenizer
a : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
tokenizer.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Tuple = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 633
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple ) -> Any:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowerCAmelCase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
SCREAMING_SNAKE_CASE : int = PipelineDataFormat.from_str(
format=_lowerCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowerCAmelCase , _lowerCAmelCase )
class _a ( __snake_case ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Optional[Any] = nlp
SCREAMING_SNAKE_CASE : Tuple = reader
@staticmethod
def __a ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser('run' ,help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' ,choices=get_supported_tasks() ,help='Task to run' )
run_parser.add_argument('--input' ,type=A_ ,help='Path to the file to use for inference' )
run_parser.add_argument('--output' ,type=A_ ,help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' ,type=A_ ,help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' ,type=A_ ,help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' ,type=A_ ,help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' ,type=A_ ,help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' ,)
run_parser.add_argument(
'--format' ,type=A_ ,default='infer' ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help='Input format to read from' ,)
run_parser.add_argument(
'--device' ,type=A_ ,default=-1 ,help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' ,)
run_parser.add_argument('--overwrite' ,action='store_true' ,help='Allow overwriting the output file.' )
run_parser.set_defaults(func=A_ )
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE : Tuple = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ ,A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE : Optional[Any] = self._reader.save_binary(A_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A_ )
| 716
|
'''simple docstring'''
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.array ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def A_ ( lowercase_ ) -> Union[str, Any]:
_snake_case : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
_snake_case : str = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
_snake_case : Any = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_snake_case : str = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_snake_case : Union[str, Any] = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowercase_ )-1}''' )
if "norm" in key:
_snake_case : Union[str, Any] = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_snake_case : List[str] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
_snake_case : int = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowercase_ )-1}''' )
if "layer_norm1" in key:
_snake_case : str = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_snake_case : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_snake_case : str = key[key.find('''block''' ) + len('''block''' )]
_snake_case : Dict = key.replace(f'''block{idx}''' , f'''block.{int(lowercase_ )-1}''' )
if "attn.q" in key:
_snake_case : Dict = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_snake_case : Optional[int] = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_snake_case : Optional[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_snake_case : str = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_snake_case : Optional[int] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_snake_case : Optional[int] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_snake_case : List[str] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_snake_case : Optional[int] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_snake_case : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_snake_case : List[Any] = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowercase_ )-1}''' )
if "bot_conv" in key:
_snake_case : Optional[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
_snake_case : Tuple = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
_snake_case : Optional[Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
_snake_case : Tuple = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
_snake_case : Dict = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
_snake_case : Dict = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
_snake_case : Dict = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
_snake_case : Optional[Any] = key.replace('''module.last_layer_depth''' , '''head.head''' )
_snake_case : Union[str, Any] = value
return new_state_dict
def A_ ( lowercase_ , lowercase_ ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_snake_case : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_snake_case : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_snake_case : Any = kv_weight[
: config.hidden_sizes[i], :
]
_snake_case : List[str] = kv_bias[: config.hidden_sizes[i]]
_snake_case : Any = kv_weight[
config.hidden_sizes[i] :, :
]
_snake_case : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def A_ ( ) -> int:
_snake_case : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case : Optional[Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
@torch.no_grad()
def A_ ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=None ) -> Optional[Any]:
_snake_case : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_snake_case : int = GLPNImageProcessor()
# prepare image
_snake_case : int = prepare_img()
_snake_case : int = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
_snake_case : List[Any] = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )
# rename keys
_snake_case : Any = rename_keys(lowercase_ )
# key and value matrices need special treatment
read_in_k_v(lowercase_ , lowercase_ )
# create HuggingFace model and load state dict
_snake_case : Dict = GLPNForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# forward pass
_snake_case : Tuple = model(lowercase_ )
_snake_case : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_snake_case : Tuple = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_snake_case : List[Any] = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
_snake_case : Union[str, Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase_ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowercase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
lowerCAmelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 326
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A_ ( ) -> int:
_snake_case : Optional[int] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
_snake_case : Tuple = Dataset.from_dict(lowercase_ )
return dataset
class A (__UpperCAmelCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : int = get_dataset()
_snake_case : Dict = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Tuple = get_dataset()
_snake_case , _snake_case : Optional[int] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_ )
| 326
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = 3_84
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 37
lowerCamelCase__ = 'gelu'
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_12
lowerCamelCase__ = 16
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = 1_28
lowerCamelCase__ = 2
lowerCamelCase__ = 9
lowerCamelCase__ = 1
lowerCamelCase__ = None
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFConvBertModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Tuple = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Union[str, Any] = False
a_ : Dict = False
a_ : str = False
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFConvBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : int ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = True
if hasattr(SCREAMING_SNAKE_CASE__ , 'use_cache' ):
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = len(model(SCREAMING_SNAKE_CASE__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ , saved_model=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'saved_model' , '1' )
lowerCamelCase__ = tf.keras.models.load_model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
lowerCamelCase__ = outputs['encoder_hidden_states']
lowerCamelCase__ = outputs['encoder_attentions']
else:
lowerCamelCase__ = outputs['hidden_states']
lowerCamelCase__ = outputs['attentions']
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = getattr(self.model_tester , 'key_length' , SCREAMING_SNAKE_CASE__ )
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
if self.is_encoder_decoder:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE__ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE__ )
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = [1, 6, 7_68]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
| 659
|
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659
| 1
|
"""simple docstring"""
import cva
import numpy as np
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , a , a ):
"""simple docstring"""
if k in (0.04, 0.06):
snake_case_ :Optional[Any] = k
snake_case_ :Tuple = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def _a ( self , a ):
"""simple docstring"""
snake_case_ :List[str] = cva.imread(a__ , 0 )
snake_case_ , snake_case_ :int = img.shape
snake_case_ :Any = []
snake_case_ :Any = img.copy()
snake_case_ :Union[str, Any] = cva.cvtColor(a__ , cva.COLOR_GRAY2RGB )
snake_case_ , snake_case_ :Optional[Any] = np.gradient(a__ )
snake_case_ :str = dx**2
snake_case_ :Optional[Any] = dy**2
snake_case_ :Any = dx * dy
snake_case_ :Tuple = 0.04
snake_case_ :Tuple = self.window_size // 2
for y in range(a__ , h - offset ):
for x in range(a__ , w - offset ):
snake_case_ :Optional[int] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ :Union[str, Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ :Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ :Any = (wxx * wyy) - (wxy**2)
snake_case_ :Any = wxx + wyy
snake_case_ :List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = HarrisCorner(0.04, 3)
__UpperCAmelCase : Optional[Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 584
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCAmelCase ( UpperCamelCase__: List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""test""" )
else:
A = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Any:
"""simple docstring"""
A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
A = script_name
else:
A = f'--config_file={args.config_file} {script_name}'
A = ["""accelerate-launch"""] + test_args.split()
A = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = test_command_parser()
A = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 641
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __snake_case ( lowercase : Iterable[str] , lowercase : int ):
snake_case_ = iter(lowercase )
while True:
snake_case_ = tuple(itertools.islice(lowercase , lowercase ) )
if not chunk:
return
yield chunk
def __snake_case ( lowercase : str ):
snake_case_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case_ = ""
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def __snake_case ( lowercase : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
snake_case_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def __snake_case ( lowercase : str , lowercase : str ):
snake_case_ = generate_table(lowercase )
snake_case_ = prepare_input(lowercase )
snake_case_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase , 2 ):
snake_case_ , snake_case_ = divmod(table.index(lowercase ) , 5 )
snake_case_ , snake_case_ = divmod(table.index(lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __snake_case ( lowercase : str , lowercase : str ):
snake_case_ = generate_table(lowercase )
snake_case_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase , 2 ):
snake_case_ , snake_case_ = divmod(table.index(lowercase ) , 5 )
snake_case_ , snake_case_ = divmod(table.index(lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 420
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def __snake_case ( lowercase : int = 1_000_000 , lowercase : int = 10 ):
snake_case_ = defaultdict(lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 420
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : int = 0
__a : bool = False
__a : float = 3.0
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_ : str = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase_ : Any = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase_ : Tuple = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __lowerCAmelCase )
@require_multi_gpu
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCamelCase =Accelerator(kwargs_handlers=[ddp_scaler])
UpperCamelCase =torch.nn.Linear(100, 200)
UpperCamelCase =accelerator.prepare(model)
# Check the values changed in kwargs
UpperCamelCase =""
UpperCamelCase =model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 208
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_ : List[str] = PNDMScheduler()
UpperCamelCase_ : Dict = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
pndm.to(__lowerCAmelCase )
pndm.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
UpperCamelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCamelCase_ : Tuple = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase )[0]
UpperCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_ : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = """google/ddpm-cifar10-32"""
UpperCamelCase_ : List[str] = UNetaDModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : int = PNDMScheduler()
UpperCamelCase_ : Any = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
pndm.to(__lowerCAmelCase )
pndm.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] = pndm(generator=__lowerCAmelCase , output_type="""numpy""" ).images
UpperCamelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_ : Union[str, Any] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 208
| 1
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode('utf-8' ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : bytes ):
'''simple docstring'''
return baseaa.baadecode(__snake_case ).decode('utf-8' )
if __name__ == "__main__":
_UpperCamelCase : Dict = 'Hello World!'
_UpperCamelCase : List[Any] = baseaa_encode(test)
print(encoded)
_UpperCamelCase : str = baseaa_decode(encoded)
print(decoded)
| 134
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '▁'
_lowercase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowercase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_lowercase = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
_lowercase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase_ ( a_ ):
'''simple docstring'''
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = ['''input_ids''', '''attention_mask''']
_lowercase : List[str] = []
_lowercase : Tuple = []
def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = None , _lowercase=None , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowercase , **_lowercase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase = src_lang if src_lang is not None else '''eng_Latn'''
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
_lowerCAmelCase = self.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase = tgt_lang_id
return inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(_lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = ''''''.join(_lowercase ).replace(_lowercase , """ """ ).strip()
return out_string
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def _lowercase ( self , _lowercase , _lowercase = "eng_Latn" , _lowercase = None , _lowercase = "fra_Latn" , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
| 5
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def a__ ( self ) -> str:
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = 5
# Realm tok
_lowerCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : int = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
_lowerCamelCase : Any = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
def a__ ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def a__ ( self ) -> int:
_lowerCamelCase : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_lowercase , )
return block_records
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a__ ( self ) -> int:
_lowerCamelCase : int = self.get_config()
_lowerCamelCase : Optional[Any] = self.get_dummy_retriever()
_lowerCamelCase : Union[str, Any] = retriever.tokenizer
_lowerCamelCase : Any = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase : Any = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : str = tokenizer(
['''the fourth'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : List[str] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def a__ ( self ) -> int:
_lowerCamelCase : str = self.get_config()
_lowerCamelCase : Union[str, Any] = self.get_dummy_retriever()
_lowerCamelCase : str = retriever.tokenizer
_lowerCamelCase : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase : List[Any] = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : Optional[Any] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _lowercase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowercase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase : Optional[Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 434
| 0
|
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ = [True] * 1_0_0_0_0_0_1
UpperCAmelCase_ = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
UpperCAmelCase_ = False
i += 1
def lowerCAmelCase_ ( lowercase: int ) -> Tuple:
'''simple docstring'''
return seive[n]
def lowerCAmelCase_ ( lowercase: int ) -> Dict:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(lowercase ) )
def lowerCAmelCase_ ( lowercase: int = 1_000_000 ) -> str:
'''simple docstring'''
_UpperCamelCase: Union[str, Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase ) and not contains_an_even_digit(lowercase ):
_UpperCamelCase: Any = str(lowercase )
_UpperCamelCase: int = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase ) )]
if all(is_prime(lowercase ) for i in list_nums ):
result.append(lowercase )
return result
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 721
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int=13 , _lowercase : Optional[int]=32 , _lowercase : Optional[Any]=3 , _lowercase : Union[str, Any]=4 , _lowercase : Any=[10, 20, 30, 40] , _lowercase : str=[2, 2, 3, 2] , _lowercase : Any=True , _lowercase : Union[str, Any]=True , _lowercase : int=37 , _lowercase : Union[str, Any]="gelu" , _lowercase : Union[str, Any]=10 , _lowercase : Tuple=0.02 , _lowercase : int=["stage2", "stage3", "stage4"] , _lowercase : Optional[Any]=3 , _lowercase : Optional[int]=None , ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = parent
_UpperCamelCase: str = batch_size
_UpperCamelCase: str = image_size
_UpperCamelCase: Any = num_channels
_UpperCamelCase: Union[str, Any] = num_stages
_UpperCamelCase: Any = hidden_sizes
_UpperCamelCase: int = depths
_UpperCamelCase: Dict = is_training
_UpperCamelCase: Optional[int] = use_labels
_UpperCamelCase: Optional[int] = intermediate_size
_UpperCamelCase: int = hidden_act
_UpperCamelCase: Tuple = type_sequence_label_size
_UpperCamelCase: List[Any] = initializer_range
_UpperCamelCase: Dict = out_features
_UpperCamelCase: Union[str, Any] = num_labels
_UpperCamelCase: Tuple = scope
_UpperCamelCase: Union[str, Any] = num_stages
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase: Any = None
if self.use_labels:
_UpperCamelCase: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowerCAmelCase ( self : Tuple , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = UperNetForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: int = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: List[str] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
): str = config_and_inputs
_UpperCamelCase: Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : List[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase : Tuple = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = UperNetModelTester(self )
_UpperCamelCase: List[str] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase: List[Any] = model_class(_lowercase )
_UpperCamelCase: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase: Any = [*signature.parameters.keys()]
_UpperCamelCase: List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(_lowercase : List[str] , _lowercase : List[str] , _lowercase : List[str] ):
_UpperCamelCase: Any = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_UpperCamelCase: Union[str, Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_UpperCamelCase: Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase: List[str] = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase: Optional[int] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase: Union[str, Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase: int = _config_zero_init(_lowercase )
_UpperCamelCase: List[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_UpperCamelCase: int = model_class(config=_lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase: Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCamelCase: Any = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase: Any = Image.open(lowercase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_UpperCamelCase: Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowercase )
_UpperCamelCase: List[Any] = prepare_img()
_UpperCamelCase: int = processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
with torch.no_grad():
_UpperCamelCase: Any = model(**_lowercase )
_UpperCamelCase: Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_UpperCamelCase: Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 ) )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_UpperCamelCase: int = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowercase )
_UpperCamelCase: int = prepare_img()
_UpperCamelCase: Union[str, Any] = processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
with torch.no_grad():
_UpperCamelCase: List[Any] = model(**_lowercase )
_UpperCamelCase: Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_UpperCamelCase: Optional[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 ) )
| 264
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : int = ["input_features"]
def __init__( self : Dict , _snake_case : str=80 , _snake_case : List[Any]=1_60_00 , _snake_case : str=1_60 , _snake_case : Any=30 , _snake_case : Dict=4_00 , _snake_case : int=0.0 , _snake_case : Tuple=False , **_snake_case : Optional[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = n_fft
A__ = hop_length
A__ = chunk_length
A__ = chunk_length * sampling_rate
A__ = self.n_samples // hop_length
A__ = sampling_rate
A__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , )
def _a ( self : List[str] , _snake_case : np.array ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
A__ = log_spec[:, :-1]
A__ = np.maximum(_snake_case , log_spec.max() - 8.0 )
A__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : str , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = True , _snake_case : Optional[int] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[str] = "max_length" , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A__ = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [np.asarray([raw_speech] ).T]
A__ = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=max_length if max_length else self.n_samples , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
A__ = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
A__ = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
A__ = [self._np_extract_fbank_features(_snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , _snake_case ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
else:
A__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
A__ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 9
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ : Union[str, Any] =abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A_ ( __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def A_ ( __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def A_ ( __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__A : Union[str, Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
if exitstatus == 5:
__A : Any = 0
# Doctest custom flag to ignore output.
A__ : Tuple =doctest.register_optionflag('IGNORE_RESULT')
A__ : List[Any] =doctest.OutputChecker
class __A ( __lowerCamelCase ):
def lowercase_( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Any ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
A__ : List[Any] =CustomOutputChecker
A__ : Dict =HfDoctestModule
A__ : int =HfDocTestParser
| 714
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
if not is_accelerate_available():
return method
__A : int = version.parse(accelerate.__version__ ).base_version
if version.parse(__SCREAMING_SNAKE_CASE ) < version.parse("""0.17.0""" ):
return method
def wrapper(self : str , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Tuple ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return wrapper
| 499
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = IFInpaintingSuperResolutionPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase_ ( self , A_ , A_=0 ) -> Optional[int]:
"""simple docstring"""
if str(A_ ).startswith('''mps''' ):
_lowercase: Dict = torch.manual_seed(A_ )
else:
_lowercase: Union[str, Any] = torch.Generator(device=A_ ).manual_seed(A_ )
_lowercase: List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
_lowercase: List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
_lowercase: int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
_lowercase: Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self ) -> str:
"""simple docstring"""
self._test_save_load_local()
def lowercase_ ( self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 353
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowercase: List[str] = params
_lowercase: str = np.array(A_ )
_lowercase: Optional[int] = np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.lengths )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Tuple = self.params.max_model_input_size
_lowercase: Tuple = self.lengths > max_len
logger.info(f'''Splitting {sum(A_ )} too long sequences.''' )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
_lowercase: Dict = []
_lowercase: Union[str, Any] = []
if self.params.mlm:
_lowercase , _lowercase: int = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
_lowercase , _lowercase: Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowercase: Optional[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowercase: Any = np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
_lowercase: Optional[int] = np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
_lowercase: Optional[Any] = np.array(A_ )
_lowercase: List[str] = np.array(A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[Any] = len(self )
_lowercase: Optional[Any] = self.lengths > 11
_lowercase: int = self.token_ids[indices]
_lowercase: List[str] = self.lengths[indices]
_lowercase: Optional[int] = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowercase: Dict = self.params.special_tok_ids['''unk_token''']
_lowercase: int = len(self )
_lowercase: Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowercase: Dict = (unk_occs / self.lengths) < 0.5
_lowercase: str = self.token_ids[indices]
_lowercase: Dict = self.lengths[indices]
_lowercase: int = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowercase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = [t[0] for t in batch]
_lowercase: Dict = [t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
_lowercase: Tuple = max(A_ )
# Pad token ids
if self.params.mlm:
_lowercase: str = self.params.special_tok_ids['''pad_token''']
else:
_lowercase: Union[str, Any] = self.params.special_tok_ids['''unk_token''']
_lowercase: int = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
_lowercase: str = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowercase: int = torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 353
| 1
|
"""simple docstring"""
from __future__ import annotations
def A( snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
lowercase__: Optional[int] = (left + right) >> 1 # the middle
lowercase__: List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
lowercase__: Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 704
|
"""simple docstring"""
class _a :
'''simple docstring'''
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 0
lowercase__: Optional[Any] = 0
lowercase__: Any = {}
def __lowercase ( self , UpperCAmelCase_) -> Tuple:
'''simple docstring'''
if vertex not in self.adjacency:
lowercase__: int = {}
self.num_vertices += 1
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) -> str:
'''simple docstring'''
self.add_vertex(UpperCAmelCase_)
self.add_vertex(UpperCAmelCase_)
if head == tail:
return
lowercase__: List[str] = weight
lowercase__: Optional[Any] = weight
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__: Optional[int] = edge
edges.remove((tail, head, weight))
for i in range(len(UpperCAmelCase_)):
lowercase__: Optional[int] = list(edges[i])
edges.sort(key=lambda UpperCAmelCase_: e[2])
for i in range(len(UpperCAmelCase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
lowercase__: str = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__: Optional[Any] = edge
lowercase__: Optional[Any] = weight
lowercase__: Any = weight
def __str__( self) -> List[str]:
'''simple docstring'''
lowercase__: Any = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__: Tuple = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __lowercase ( UpperCAmelCase_=None , UpperCAmelCase_=None) -> Optional[int]:
'''simple docstring'''
lowercase__: Any = Graph()
if vertices is None:
lowercase__: List[Any] = []
if edges is None:
lowercase__: Any = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase_)
for edge in edges:
g.add_edge(*UpperCAmelCase_)
return g
class _a :
'''simple docstring'''
def __init__( self) -> List[str]:
'''simple docstring'''
lowercase__: str = {}
lowercase__: int = {}
def __len__( self) -> Optional[int]:
'''simple docstring'''
return len(self.parent)
def __lowercase ( self , UpperCAmelCase_) -> Any:
'''simple docstring'''
if item in self.parent:
return self.find(UpperCAmelCase_)
lowercase__: str = item
lowercase__: Any = 0
return item
def __lowercase ( self , UpperCAmelCase_) -> Tuple:
'''simple docstring'''
if item not in self.parent:
return self.make_set(UpperCAmelCase_)
if item != self.parent[item]:
lowercase__: Tuple = self.find(self.parent[item])
return self.parent[item]
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = self.find(UpperCAmelCase_)
lowercase__: Optional[int] = self.find(UpperCAmelCase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__: List[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__: Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__: List[str] = roota
return roota
return None
@staticmethod
def __lowercase ( UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: str = graph.num_vertices
lowercase__: Optional[Any] = Graph.UnionFind()
lowercase__: List[str] = []
while num_components > 1:
lowercase__: int = {}
for vertex in graph.get_vertices():
lowercase__: Dict = -1
lowercase__: Dict = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__: str = edge
edges.remove((tail, head, weight))
for edge in edges:
lowercase__ , lowercase__ , lowercase__: Optional[Any] = edge
lowercase__: List[str] = union_find.find(UpperCAmelCase_)
lowercase__: List[Any] = union_find.find(UpperCAmelCase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__: Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__: Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__: Optional[Any] = cheap_edge[vertex]
if union_find.find(UpperCAmelCase_) != union_find.find(UpperCAmelCase_):
union_find.union(UpperCAmelCase_ , UpperCAmelCase_)
mst_edges.append(cheap_edge[vertex])
lowercase__: Optional[int] = num_components - 1
lowercase__: str = Graph.build(edges=UpperCAmelCase_)
return mst
| 120
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def a ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE__ : Tuple = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : str = spark.range(1_0_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : str = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : str = spark.range(1_0 ).repartition(2 )
SCREAMING_SNAKE_CASE__ : str = [1, 0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Dict = spark.range(1_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : Tuple = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Optional[Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
SCREAMING_SNAKE_CASE__ : int = lambda A__ : x.reverse()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
SCREAMING_SNAKE_CASE__ : Dict = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : List[str] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE__ : Optional[int] = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE__ : int = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 35
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : int ):
__a = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__a = n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE ( a_ : int ):
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def SCREAMING_SNAKE_CASE ( a_ : int ):
if n < 0:
raise ValueError('factorial() not defined for negative values' )
__a = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE ( a_ : int ):
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 490
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE ( a_ : List[DatasetType] , a_ : Optional[List[float]] = None , a_ : Optional[int] = None , a_ : Optional[DatasetInfo] = None , a_ : Optional[NamedSplit] = None , a_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(a_ ):
if not isinstance(a_ , (Dataset, IterableDataset) ):
if isinstance(a_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(a_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}." )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(a_ , a_ ) else (IterableDataset, Dataset)
)
elif not isinstance(a_ , a_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
a_ , a_ , a_ , info=a_ , split=a_ , stopping_strategy=a_ )
else:
return _interleave_iterable_datasets(
a_ , a_ , a_ , info=a_ , split=a_ , stopping_strategy=a_ )
def SCREAMING_SNAKE_CASE ( a_ : List[DatasetType] , a_ : Optional[DatasetInfo] = None , a_ : Optional[NamedSplit] = None , a_ : int = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(a_ ):
if not isinstance(a_ , (Dataset, IterableDataset) ):
if isinstance(a_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(a_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}." )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(a_ , a_ ) else (IterableDataset, Dataset)
)
elif not isinstance(a_ , a_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(a_ , info=a_ , split=a_ , axis=a_ )
else:
return _concatenate_iterable_datasets(a_ , info=a_ , split=a_ , axis=a_ )
| 490
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
snake_case_ : List[Any] = logging.getLogger()
def lowercase__( )-> int:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCamelCase = parser.parse_args()
return args.f
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(A_ )
def a ( self , A_ ):
_UpperCamelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(A_ , "argv" , A_ ):
_UpperCamelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(A_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def a ( self ):
_UpperCamelCase = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(A_ )
_UpperCamelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(A_ )
_UpperCamelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(A_ )
| 138
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCamelCase = f"{src_lang}-{tgt_lang}"
_UpperCamelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCamelCase = os.path.join(_UpperCamelCase , "README.md" )
print(f"Generating {path}" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
snake_case_ : List[Any] = Path(__file__).resolve().parent.parent.parent
snake_case_ : List[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : str = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 138
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Dict = '''owlvit_text_model'''
def __init__( self : Optional[int] , _snake_case : Any=4_9408 , _snake_case : Optional[Any]=512 , _snake_case : Optional[Any]=2048 , _snake_case : str=12 , _snake_case : Dict=8 , _snake_case : Optional[Any]=16 , _snake_case : List[str]="quick_gelu" , _snake_case : Tuple=1E-5 , _snake_case : int=0.0 , _snake_case : int=0.02 , _snake_case : str=1.0 , _snake_case : str=0 , _snake_case : Union[str, Any]=4_9406 , _snake_case : List[str]=4_9407 , **_snake_case : str , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : int = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : Any = intermediate_size
__lowercase : int = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : str = max_position_embeddings
__lowercase : Any = hidden_act
__lowercase : Tuple = layer_norm_eps
__lowercase : Optional[Any] = attention_dropout
__lowercase : int = initializer_range
__lowercase : int = initializer_factor
@classmethod
def snake_case_ ( cls : int , _snake_case : Union[str, os.PathLike] , **_snake_case : Dict ):
cls._set_token_in_kwargs(_snake_case )
__lowercase , __lowercase : int = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = '''owlvit_vision_model'''
def __init__( self : List[Any] , _snake_case : Tuple=768 , _snake_case : Dict=3072 , _snake_case : Optional[Any]=12 , _snake_case : List[Any]=12 , _snake_case : Optional[Any]=3 , _snake_case : Tuple=768 , _snake_case : str=32 , _snake_case : List[Any]="quick_gelu" , _snake_case : Optional[Any]=1E-5 , _snake_case : Tuple=0.0 , _snake_case : Dict=0.02 , _snake_case : List[Any]=1.0 , **_snake_case : Optional[Any] , ):
super().__init__(**_snake_case )
__lowercase : Optional[int] = hidden_size
__lowercase : List[str] = intermediate_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Dict = num_channels
__lowercase : Tuple = image_size
__lowercase : Optional[int] = patch_size
__lowercase : int = hidden_act
__lowercase : Dict = layer_norm_eps
__lowercase : Optional[Any] = attention_dropout
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = initializer_factor
@classmethod
def snake_case_ ( cls : List[Any] , _snake_case : Union[str, os.PathLike] , **_snake_case : Union[str, Any] ):
cls._set_token_in_kwargs(_snake_case )
__lowercase , __lowercase : List[Any] = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = '''owlvit'''
A__ : List[str] = True
def __init__( self : Union[str, Any] , _snake_case : int=None , _snake_case : Any=None , _snake_case : Dict=512 , _snake_case : int=2.65_92 , _snake_case : Tuple=True , **_snake_case : List[Any] , ):
super().__init__(**_snake_case )
if text_config is None:
__lowercase : Optional[int] = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__lowercase : Optional[Any] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__lowercase : List[str] = OwlViTTextConfig(**_snake_case )
__lowercase : Any = OwlViTVisionConfig(**_snake_case )
__lowercase : Optional[Any] = projection_dim
__lowercase : int = logit_scale_init_value
__lowercase : Tuple = return_dict
__lowercase : Optional[Any] = 1.0
@classmethod
def snake_case_ ( cls : Any , _snake_case : Union[str, os.PathLike] , **_snake_case : int ):
cls._set_token_in_kwargs(_snake_case )
__lowercase , __lowercase : List[str] = cls.get_config_dict(_snake_case , **_snake_case )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
@classmethod
def snake_case_ ( cls : Dict , _snake_case : Dict , _snake_case : Dict , **_snake_case : str ):
__lowercase : Union[str, Any] = {}
__lowercase : Dict = text_config
__lowercase : List[str] = vision_config
return cls.from_dict(_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] ):
__lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
__lowercase : List[Any] = self.text_config.to_dict()
__lowercase : List[str] = self.vision_config.to_dict()
__lowercase : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case_ ( self : Union[str, Any] ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def snake_case_ ( self : int ):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def snake_case_ ( self : Any ):
return 1E-4
def snake_case_ ( self : Union[str, Any] , _snake_case : "ProcessorMixin" , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : Optional["TensorType"] = None , ):
__lowercase : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_snake_case , seq_length=_snake_case , framework=_snake_case )
__lowercase : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_snake_case , framework=_snake_case )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self : Dict ):
return 14
| 284
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[str] = -1
__lowercase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : Tuple = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : Optional[int] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : Optional[Any] = TextStreamer(_snake_case )
model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : List[Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[str] = -1
__lowercase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : List[Any] = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
__lowercase : int = TextIteratorStreamer(_snake_case )
__lowercase : int = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Optional[Any] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
__lowercase : Optional[int] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : List[Any] = -1
__lowercase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : str = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case )
__lowercase : List[Any] = greedy_ids[:, input_ids.shape[1] :]
__lowercase : Optional[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__lowercase : int = TextStreamer(_snake_case , skip_prompt=_snake_case )
model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__lowercase : int = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__lowercase : Dict = AutoTokenizer.from_pretrained('''distilgpt2''' )
__lowercase : List[str] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_snake_case )
__lowercase : str = -1
__lowercase : Dict = torch.ones((1, 5) , device=_snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__lowercase : Optional[Any] = TextStreamer(_snake_case , skip_special_tokens=_snake_case )
model.generate(_snake_case , max_new_tokens=1 , do_sample=_snake_case , streamer=_snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__lowercase : Dict = cs.out[:-1] # Remove the final "\n"
__lowercase : Any = tokenizer(_snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case_ ( self : Any ):
__lowercase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
__lowercase : Dict = -1
__lowercase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
__lowercase : Tuple = TextIteratorStreamer(_snake_case , timeout=0.0_01 )
__lowercase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__lowercase : Optional[int] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_snake_case ):
__lowercase : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 284
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 237
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 237
| 1
|
def lowercase ( __A : int = 100 ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 719
|
def lowercase ( __A : int = 100_0000 ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __A ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 315
| 0
|
from __future__ import annotations
def __lowerCamelCase ( __a :list[int | float] , __a :int , __a :int ) -> int | float:
"""simple docstring"""
if len(__a ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
A__ = (left + right) >> 1 # the middle
A__ = find_max(__a , __a , __a ) # find max in range[left, mid]
A__ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 176
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : str , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Dict , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 176
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=a_ )
_UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
_UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(a_, "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 257
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=a_ )
_UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
_UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(a_, "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 257
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.